1 /*
2 * (MPSAFE)
3 *
4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
5 *
6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
79 * Allocations >= ZoneLimit go directly to kmem.
80 *
81 * Alignment properties:
82 * - All power-of-2 sized allocations are power-of-2 aligned.
83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
84 * power-of-2 round up of 'size'.
85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
86 * above table 'Chunking' column).
87 *
88 * API REQUIREMENTS AND SIDE EFFECTS
89 *
90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91 * have remained compatible with the following API requirements:
92 *
93 * + malloc(0) is allowed and returns non-NULL (ahc driver)
94 * + ability to allocate arbitrarily large chunks of memory
95 */
96
97 #include "opt_vm.h"
98
99 #include <sys/param.h>
100 #include <sys/systm.h>
101 #include <sys/kernel.h>
102 #include <sys/slaballoc.h>
103 #include <sys/mbuf.h>
104 #include <sys/vmmeter.h>
105 #include <sys/lock.h>
106 #include <sys/thread.h>
107 #include <sys/globaldata.h>
108 #include <sys/sysctl.h>
109 #include <sys/ktr.h>
110
111 #include <vm/vm.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
114 #include <vm/vm_extern.h>
115 #include <vm/vm_object.h>
116 #include <vm/pmap.h>
117 #include <vm/vm_map.h>
118 #include <vm/vm_page.h>
119 #include <vm/vm_pageout.h>
120
121 #include <machine/cpu.h>
122
123 #include <sys/thread2.h>
124 #include <vm/vm_page2.h>
125
126 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
127
128 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
129 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
130
131 #if !defined(KTR_MEMORY)
132 #define KTR_MEMORY KTR_ALL
133 #endif
134 KTR_INFO_MASTER(memory);
135 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
136 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
137 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
138 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
139 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
140 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
141 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
142 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
143 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
144 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
145 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
146
147 #define logmemory(name, ptr, type, size, flags) \
148 KTR_LOG(memory_ ## name, ptr, type, size, flags)
149 #define logmemory_quick(name) \
150 KTR_LOG(memory_ ## name)
151
152 /*
153 * Fixed globals (not per-cpu)
154 */
155 static int ZoneSize;
156 static int ZoneLimit;
157 static int ZonePageCount;
158 static uintptr_t ZoneMask;
159 static int ZoneBigAlloc; /* in KB */
160 static int ZoneGenAlloc; /* in KB */
161 struct malloc_type *kmemstatistics; /* exported to vmstat */
162 static int32_t weirdary[16];
163
164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
165 static void kmem_slab_free(void *ptr, vm_size_t bytes);
166
167 #if defined(INVARIANTS)
168 static void chunk_mark_allocated(SLZone *z, void *chunk);
169 static void chunk_mark_free(SLZone *z, void *chunk);
170 #else
171 #define chunk_mark_allocated(z, chunk)
172 #define chunk_mark_free(z, chunk)
173 #endif
174
175 /*
176 * Misc constants. Note that allocations that are exact multiples of
177 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
178 */
179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */
180
181 /*
182 * The WEIRD_ADDR is used as known text to copy into free objects to
183 * try to create deterministic failure cases if the data is accessed after
184 * free.
185 */
186 #define WEIRD_ADDR 0xdeadc0de
187 #define MAX_COPY sizeof(weirdary)
188 #define ZERO_LENGTH_PTR ((void *)-8)
189
190 /*
191 * Misc global malloc buckets
192 */
193
194 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
195 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
196 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
197
198 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
199 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
200
201 /*
202 * Initialize the slab memory allocator. We have to choose a zone size based
203 * on available physical memory. We choose a zone side which is approximately
204 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
205 * 128K. The zone size is limited to the bounds set in slaballoc.h
206 * (typically 32K min, 128K max).
207 */
208 static void kmeminit(void *dummy);
209
210 char *ZeroPage;
211
212 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
213
214 #ifdef INVARIANTS
215 /*
216 * If enabled any memory allocated without M_ZERO is initialized to -1.
217 */
218 static int use_malloc_pattern;
219 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
220 &use_malloc_pattern, 0,
221 "Initialize memory to -1 if M_ZERO not specified");
222 #endif
223
224 static int ZoneRelsThresh = ZONE_RELS_THRESH;
225 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
226 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
227 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
228 static long SlabsAllocated;
229 static long SlabsFreed;
230 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, &SlabsAllocated, 0, "");
231 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, &SlabsFreed, 0, "");
232
233 /*
234 * Returns the kernel memory size limit for the purposes of initializing
235 * various subsystem caches. The smaller of available memory and the KVM
236 * memory space is returned.
237 *
238 * The size in megabytes is returned.
239 */
240 size_t
241 kmem_lim_size(void)
242 {
243 size_t limsize;
244
245 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
246 if (limsize > KvaSize)
247 limsize = KvaSize;
248 return (limsize / (1024 * 1024));
249 }
250
251 static void
252 kmeminit(void *dummy)
253 {
254 size_t limsize;
255 int usesize;
256 int i;
257
258 limsize = kmem_lim_size();
259 usesize = (int)(limsize * 1024); /* convert to KB */
260
261 /*
262 * If the machine has a large KVM space and more than 8G of ram,
263 * double the zone release threshold to reduce SMP invalidations.
264 * If more than 16G of ram, do it again.
265 *
266 * The BIOS eats a little ram so add some slop. We want 8G worth of
267 * memory sticks to trigger the first adjustment.
268 */
269 if (ZoneRelsThresh == ZONE_RELS_THRESH) {
270 if (limsize >= 7 * 1024)
271 ZoneRelsThresh *= 2;
272 if (limsize >= 15 * 1024)
273 ZoneRelsThresh *= 2;
274 }
275
276 /*
277 * Calculate the zone size. This typically calculates to
278 * ZALLOC_MAX_ZONE_SIZE
279 */
280 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
281 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
282 ZoneSize <<= 1;
283 ZoneLimit = ZoneSize / 4;
284 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
285 ZoneLimit = ZALLOC_ZONE_LIMIT;
286 ZoneMask = ~(uintptr_t)(ZoneSize - 1);
287 ZonePageCount = ZoneSize / PAGE_SIZE;
288
289 for (i = 0; i < NELEM(weirdary); ++i)
290 weirdary[i] = WEIRD_ADDR;
291
292 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
293
294 if (bootverbose)
295 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
296 }
297
298 /*
299 * Initialize a malloc type tracking structure.
300 */
301 void
302 malloc_init(void *data)
303 {
304 struct malloc_type *type = data;
305 size_t limsize;
306
307 if (type->ks_magic != M_MAGIC)
308 panic("malloc type lacks magic");
309
310 if (type->ks_limit != 0)
311 return;
312
313 if (vmstats.v_page_count == 0)
314 panic("malloc_init not allowed before vm init");
315
316 limsize = kmem_lim_size() * (1024 * 1024);
317 type->ks_limit = limsize / 10;
318
319 type->ks_next = kmemstatistics;
320 kmemstatistics = type;
321 }
322
323 void
324 malloc_uninit(void *data)
325 {
326 struct malloc_type *type = data;
327 struct malloc_type *t;
328 #ifdef INVARIANTS
329 int i;
330 long ttl;
331 #endif
332
333 if (type->ks_magic != M_MAGIC)
334 panic("malloc type lacks magic");
335
336 if (vmstats.v_page_count == 0)
337 panic("malloc_uninit not allowed before vm init");
338
339 if (type->ks_limit == 0)
340 panic("malloc_uninit on uninitialized type");
341
342 /* Make sure that all pending kfree()s are finished. */
343 lwkt_synchronize_ipiqs("muninit");
344
345 #ifdef INVARIANTS
346 /*
347 * memuse is only correct in aggregation. Due to memory being allocated
348 * on one cpu and freed on another individual array entries may be
349 * negative or positive (canceling each other out).
350 */
351 for (i = ttl = 0; i < ncpus; ++i)
352 ttl += type->ks_memuse[i];
353 if (ttl) {
354 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
355 ttl, type->ks_shortdesc, i);
356 }
357 #endif
358 if (type == kmemstatistics) {
359 kmemstatistics = type->ks_next;
360 } else {
361 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
362 if (t->ks_next == type) {
363 t->ks_next = type->ks_next;
364 break;
365 }
366 }
367 }
368 type->ks_next = NULL;
369 type->ks_limit = 0;
370 }
371
372 /*
373 * Increase the kmalloc pool limit for the specified pool. No changes
374 * are the made if the pool would shrink.
375 */
376 void
377 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
378 {
379 if (type->ks_limit == 0)
380 malloc_init(type);
381 if (bytes == 0)
382 bytes = KvaSize;
383 if (type->ks_limit < bytes)
384 type->ks_limit = bytes;
385 }
386
387 /*
388 * Dynamically create a malloc pool. This function is a NOP if *typep is
389 * already non-NULL.
390 */
391 void
392 kmalloc_create(struct malloc_type **typep, const char *descr)
393 {
394 struct malloc_type *type;
395
396 if (*typep == NULL) {
397 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
398 type->ks_magic = M_MAGIC;
399 type->ks_shortdesc = descr;
400 malloc_init(type);
401 *typep = type;
402 }
403 }
404
405 /*
406 * Destroy a dynamically created malloc pool. This function is a NOP if
407 * the pool has already been destroyed.
408 */
409 void
410 kmalloc_destroy(struct malloc_type **typep)
411 {
412 if (*typep != NULL) {
413 malloc_uninit(*typep);
414 kfree(*typep, M_TEMP);
415 *typep = NULL;
416 }
417 }
418
419 /*
420 * Calculate the zone index for the allocation request size and set the
421 * allocation request size to that particular zone's chunk size.
422 */
423 static __inline int
424 zoneindex(unsigned long *bytes, unsigned long *align)
425 {
426 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
427 if (n < 128) {
428 *bytes = n = (n + 7) & ~7;
429 *align = 8;
430 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
431 }
432 if (n < 256) {
433 *bytes = n = (n + 15) & ~15;
434 *align = 16;
435 return(n / 16 + 7);
436 }
437 if (n < 8192) {
438 if (n < 512) {
439 *bytes = n = (n + 31) & ~31;
440 *align = 32;
441 return(n / 32 + 15);
442 }
443 if (n < 1024) {
444 *bytes = n = (n + 63) & ~63;
445 *align = 64;
446 return(n / 64 + 23);
447 }
448 if (n < 2048) {
449 *bytes = n = (n + 127) & ~127;
450 *align = 128;
451 return(n / 128 + 31);
452 }
453 if (n < 4096) {
454 *bytes = n = (n + 255) & ~255;
455 *align = 256;
456 return(n / 256 + 39);
457 }
458 *bytes = n = (n + 511) & ~511;
459 *align = 512;
460 return(n / 512 + 47);
461 }
462 #if ZALLOC_ZONE_LIMIT > 8192
463 if (n < 16384) {
464 *bytes = n = (n + 1023) & ~1023;
465 *align = 1024;
466 return(n / 1024 + 55);
467 }
468 #endif
469 #if ZALLOC_ZONE_LIMIT > 16384
470 if (n < 32768) {
471 *bytes = n = (n + 2047) & ~2047;
472 *align = 2048;
473 return(n / 2048 + 63);
474 }
475 #endif
476 panic("Unexpected byte count %d", n);
477 return(0);
478 }
479
480 static __inline
481 void
482 clean_zone_rchunks(SLZone *z)
483 {
484 SLChunk *bchunk;
485
486 while ((bchunk = z->z_RChunks) != NULL) {
487 cpu_ccfence();
488 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
489 *z->z_LChunksp = bchunk;
490 while (bchunk) {
491 chunk_mark_free(z, bchunk);
492 z->z_LChunksp = &bchunk->c_Next;
493 bchunk = bchunk->c_Next;
494 ++z->z_NFree;
495 }
496 break;
497 }
498 /* retry */
499 }
500 }
501
502 /*
503 * If the zone becomes totally free, and there are other zones we
504 * can allocate from, move this zone to the FreeZones list. Since
505 * this code can be called from an IPI callback, do *NOT* try to mess
506 * with kernel_map here. Hysteresis will be performed at malloc() time.
507 */
508 static __inline
509 SLZone *
510 check_zone_free(SLGlobalData *slgd, SLZone *z)
511 {
512 if (z->z_NFree == z->z_NMax &&
513 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) &&
514 z->z_RCount == 0
515 ) {
516 SLZone **pz;
517 int *kup;
518
519 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
520 ;
521 *pz = z->z_Next;
522 z->z_Magic = -1;
523 z->z_Next = slgd->FreeZones;
524 slgd->FreeZones = z;
525 ++slgd->NFreeZones;
526 kup = btokup(z);
527 *kup = 0;
528 z = *pz;
529 } else {
530 z = z->z_Next;
531 }
532 return z;
533 }
534
535 #ifdef SLAB_DEBUG
536 /*
537 * Used to debug memory corruption issues. Record up to (typically 32)
538 * allocation sources for this zone (for a particular chunk size).
539 */
540
541 static void
542 slab_record_source(SLZone *z, const char *file, int line)
543 {
544 int i;
545 int b = line & (SLAB_DEBUG_ENTRIES - 1);
546
547 i = b;
548 do {
549 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
550 return;
551 if (z->z_Sources[i].file == NULL)
552 break;
553 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
554 } while (i != b);
555 z->z_Sources[i].file = file;
556 z->z_Sources[i].line = line;
557 }
558
559 #endif
560
561 static __inline unsigned long
562 powerof2_size(unsigned long size)
563 {
564 int i;
565
566 if (size == 0 || powerof2(size))
567 return size;
568
569 i = flsl(size);
570 return (1UL << i);
571 }
572
573 /*
574 * kmalloc() (SLAB ALLOCATOR)
575 *
576 * Allocate memory via the slab allocator. If the request is too large,
577 * or if it page-aligned beyond a certain size, we fall back to the
578 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
579 * &SlabMisc if you don't care.
580 *
581 * M_RNOWAIT - don't block.
582 * M_NULLOK - return NULL instead of blocking.
583 * M_ZERO - zero the returned memory.
584 * M_USE_RESERVE - allow greater drawdown of the free list
585 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
586 * M_POWEROF2 - roundup size to the nearest power of 2
587 *
588 * MPSAFE
589 */
590
591 #ifdef SLAB_DEBUG
592 void *
593 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
594 const char *file, int line)
595 #else
596 void *
597 kmalloc(unsigned long size, struct malloc_type *type, int flags)
598 #endif
599 {
600 SLZone *z;
601 SLChunk *chunk;
602 SLGlobalData *slgd;
603 struct globaldata *gd;
604 unsigned long align;
605 int zi;
606 #ifdef INVARIANTS
607 int i;
608 #endif
609
610 logmemory_quick(malloc_beg);
611 gd = mycpu;
612 slgd = &gd->gd_slab;
613
614 /*
615 * XXX silly to have this in the critical path.
616 */
617 if (type->ks_limit == 0) {
618 crit_enter();
619 if (type->ks_limit == 0)
620 malloc_init(type);
621 crit_exit();
622 }
623 ++type->ks_calls;
624
625 if (flags & M_POWEROF2)
626 size = powerof2_size(size);
627
628 /*
629 * Handle the case where the limit is reached. Panic if we can't return
630 * NULL. The original malloc code looped, but this tended to
631 * simply deadlock the computer.
632 *
633 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
634 * to determine if a more complete limit check should be done. The
635 * actual memory use is tracked via ks_memuse[cpu].
636 */
637 while (type->ks_loosememuse >= type->ks_limit) {
638 int i;
639 long ttl;
640
641 for (i = ttl = 0; i < ncpus; ++i)
642 ttl += type->ks_memuse[i];
643 type->ks_loosememuse = ttl; /* not MP synchronized */
644 if ((ssize_t)ttl < 0) /* deal with occassional race */
645 ttl = 0;
646 if (ttl >= type->ks_limit) {
647 if (flags & M_NULLOK) {
648 logmemory(malloc_end, NULL, type, size, flags);
649 return(NULL);
650 }
651 panic("%s: malloc limit exceeded", type->ks_shortdesc);
652 }
653 }
654
655 /*
656 * Handle the degenerate size == 0 case. Yes, this does happen.
657 * Return a special pointer. This is to maintain compatibility with
658 * the original malloc implementation. Certain devices, such as the
659 * adaptec driver, not only allocate 0 bytes, they check for NULL and
660 * also realloc() later on. Joy.
661 */
662 if (size == 0) {
663 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
664 return(ZERO_LENGTH_PTR);
665 }
666
667 /*
668 * Handle hysteresis from prior frees here in malloc(). We cannot
669 * safely manipulate the kernel_map in free() due to free() possibly
670 * being called via an IPI message or from sensitive interrupt code.
671 *
672 * NOTE: ku_pagecnt must be cleared before we free the slab or we
673 * might race another cpu allocating the kva and setting
674 * ku_pagecnt.
675 */
676 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
677 crit_enter();
678 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */
679 int *kup;
680
681 z = slgd->FreeZones;
682 slgd->FreeZones = z->z_Next;
683 --slgd->NFreeZones;
684 kup = btokup(z);
685 *kup = 0;
686 kmem_slab_free(z, ZoneSize); /* may block */
687 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024);
688 }
689 crit_exit();
690 }
691
692 /*
693 * XXX handle oversized frees that were queued from kfree().
694 */
695 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
696 crit_enter();
697 if ((z = slgd->FreeOvZones) != NULL) {
698 vm_size_t tsize;
699
700 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
701 slgd->FreeOvZones = z->z_Next;
702 tsize = z->z_ChunkSize;
703 kmem_slab_free(z, tsize); /* may block */
704 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
705 }
706 crit_exit();
707 }
708
709 /*
710 * Handle large allocations directly. There should not be very many of
711 * these so performance is not a big issue.
712 *
713 * The backend allocator is pretty nasty on a SMP system. Use the
714 * slab allocator for one and two page-sized chunks even though we lose
715 * some efficiency. XXX maybe fix mmio and the elf loader instead.
716 */
717 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
718 int *kup;
719
720 size = round_page(size);
721 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
722 if (chunk == NULL) {
723 logmemory(malloc_end, NULL, type, size, flags);
724 return(NULL);
725 }
726 atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
727 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
728 flags |= M_PASSIVE_ZERO;
729 kup = btokup(chunk);
730 *kup = size / PAGE_SIZE;
731 crit_enter();
732 goto done;
733 }
734
735 /*
736 * Attempt to allocate out of an existing zone. First try the free list,
737 * then allocate out of unallocated space. If we find a good zone move
738 * it to the head of the list so later allocations find it quickly
739 * (we might have thousands of zones in the list).
740 *
741 * Note: zoneindex() will panic of size is too large.
742 */
743 zi = zoneindex(&size, &align);
744 KKASSERT(zi < NZONES);
745 crit_enter();
746
747 if ((z = slgd->ZoneAry[zi]) != NULL) {
748 /*
749 * Locate a chunk - we have to have at least one. If this is the
750 * last chunk go ahead and do the work to retrieve chunks freed
751 * from remote cpus, and if the zone is still empty move it off
752 * the ZoneAry.
753 */
754 if (--z->z_NFree <= 0) {
755 KKASSERT(z->z_NFree == 0);
756
757 /*
758 * WARNING! This code competes with other cpus. It is ok
759 * for us to not drain RChunks here but we might as well, and
760 * it is ok if more accumulate after we're done.
761 *
762 * Set RSignal before pulling rchunks off, indicating that we
763 * will be moving ourselves off of the ZoneAry. Remote ends will
764 * read RSignal before putting rchunks on thus interlocking
765 * their IPI signaling.
766 */
767 if (z->z_RChunks == NULL)
768 atomic_swap_int(&z->z_RSignal, 1);
769
770 clean_zone_rchunks(z);
771
772 /*
773 * Remove from the zone list if no free chunks remain.
774 * Clear RSignal
775 */
776 if (z->z_NFree == 0) {
777 slgd->ZoneAry[zi] = z->z_Next;
778 z->z_Next = NULL;
779 } else {
780 z->z_RSignal = 0;
781 }
782 }
783
784 /*
785 * Fast path, we have chunks available in z_LChunks.
786 */
787 chunk = z->z_LChunks;
788 if (chunk) {
789 chunk_mark_allocated(z, chunk);
790 z->z_LChunks = chunk->c_Next;
791 if (z->z_LChunks == NULL)
792 z->z_LChunksp = &z->z_LChunks;
793 #ifdef SLAB_DEBUG
794 slab_record_source(z, file, line);
795 #endif
796 goto done;
797 }
798
799 /*
800 * No chunks are available in LChunks, the free chunk MUST be
801 * in the never-before-used memory area, controlled by UIndex.
802 *
803 * The consequences are very serious if our zone got corrupted so
804 * we use an explicit panic rather than a KASSERT.
805 */
806 if (z->z_UIndex + 1 != z->z_NMax)
807 ++z->z_UIndex;
808 else
809 z->z_UIndex = 0;
810
811 if (z->z_UIndex == z->z_UEndIndex)
812 panic("slaballoc: corrupted zone");
813
814 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
815 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
816 flags &= ~M_ZERO;
817 flags |= M_PASSIVE_ZERO;
818 }
819 chunk_mark_allocated(z, chunk);
820 #ifdef SLAB_DEBUG
821 slab_record_source(z, file, line);
822 #endif
823 goto done;
824 }
825
826 /*
827 * If all zones are exhausted we need to allocate a new zone for this
828 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
829 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
830 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
831 * we do not pre-zero it because we do not want to mess up the L1 cache.
832 *
833 * At least one subsystem, the tty code (see CROUND) expects power-of-2
834 * allocations to be power-of-2 aligned. We maintain compatibility by
835 * adjusting the base offset below.
836 */
837 {
838 int off;
839 int *kup;
840
841 if ((z = slgd->FreeZones) != NULL) {
842 slgd->FreeZones = z->z_Next;
843 --slgd->NFreeZones;
844 bzero(z, sizeof(SLZone));
845 z->z_Flags |= SLZF_UNOTZEROD;
846 } else {
847 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
848 if (z == NULL)
849 goto fail;
850 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024);
851 }
852
853 /*
854 * How big is the base structure?
855 */
856 #if defined(INVARIANTS)
857 /*
858 * Make room for z_Bitmap. An exact calculation is somewhat more
859 * complicated so don't make an exact calculation.
860 */
861 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
862 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
863 #else
864 off = sizeof(SLZone);
865 #endif
866
867 /*
868 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
869 * Otherwise properly align the data according to the chunk size.
870 */
871 if (powerof2(size))
872 align = size;
873 off = (off + align - 1) & ~(align - 1);
874
875 z->z_Magic = ZALLOC_SLAB_MAGIC;
876 z->z_ZoneIndex = zi;
877 z->z_NMax = (ZoneSize - off) / size;
878 z->z_NFree = z->z_NMax - 1;
879 z->z_BasePtr = (char *)z + off;
880 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
881 z->z_ChunkSize = size;
882 z->z_CpuGd = gd;
883 z->z_Cpu = gd->gd_cpuid;
884 z->z_LChunksp = &z->z_LChunks;
885 #ifdef SLAB_DEBUG
886 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
887 bzero(z->z_Sources, sizeof(z->z_Sources));
888 #endif
889 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
890 z->z_Next = slgd->ZoneAry[zi];
891 slgd->ZoneAry[zi] = z;
892 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
893 flags &= ~M_ZERO; /* already zero'd */
894 flags |= M_PASSIVE_ZERO;
895 }
896 kup = btokup(z);
897 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */
898 chunk_mark_allocated(z, chunk);
899 #ifdef SLAB_DEBUG
900 slab_record_source(z, file, line);
901 #endif
902
903 /*
904 * Slide the base index for initial allocations out of the next
905 * zone we create so we do not over-weight the lower part of the
906 * cpu memory caches.
907 */
908 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
909 & (ZALLOC_MAX_ZONE_SIZE - 1);
910 }
911
912 done:
913 ++type->ks_inuse[gd->gd_cpuid];
914 type->ks_memuse[gd->gd_cpuid] += size;
915 type->ks_loosememuse += size; /* not MP synchronized */
916 crit_exit();
917
918 if (flags & M_ZERO)
919 bzero(chunk, size);
920 #ifdef INVARIANTS
921 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
922 if (use_malloc_pattern) {
923 for (i = 0; i < size; i += sizeof(int)) {
924 *(int *)((char *)chunk + i) = -1;
925 }
926 }
927 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
928 }
929 #endif
930 logmemory(malloc_end, chunk, type, size, flags);
931 return(chunk);
932 fail:
933 crit_exit();
934 logmemory(malloc_end, NULL, type, size, flags);
935 return(NULL);
936 }
937
938 /*
939 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
940 *
941 * Generally speaking this routine is not called very often and we do
942 * not attempt to optimize it beyond reusing the same pointer if the
943 * new size fits within the chunking of the old pointer's zone.
944 */
945 #ifdef SLAB_DEBUG
946 void *
947 krealloc_debug(void *ptr, unsigned long size,
948 struct malloc_type *type, int flags,
949 const char *file, int line)
950 #else
951 void *
952 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
953 #endif
954 {
955 unsigned long osize;
956 unsigned long align;
957 SLZone *z;
958 void *nptr;
959 int *kup;
960
961 KKASSERT((flags & M_ZERO) == 0); /* not supported */
962
963 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
964 return(kmalloc_debug(size, type, flags, file, line));
965 if (size == 0) {
966 kfree(ptr, type);
967 return(NULL);
968 }
969
970 /*
971 * Handle oversized allocations. XXX we really should require that a
972 * size be passed to free() instead of this nonsense.
973 */
974 kup = btokup(ptr);
975 if (*kup > 0) {
976 osize = *kup << PAGE_SHIFT;
977 if (osize == round_page(size))
978 return(ptr);
979 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
980 return(NULL);
981 bcopy(ptr, nptr, min(size, osize));
982 kfree(ptr, type);
983 return(nptr);
984 }
985
986 /*
987 * Get the original allocation's zone. If the new request winds up
988 * using the same chunk size we do not have to do anything.
989 */
990 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
991 kup = btokup(z);
992 KKASSERT(*kup < 0);
993 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
994
995 /*
996 * Allocate memory for the new request size. Note that zoneindex has
997 * already adjusted the request size to the appropriate chunk size, which
998 * should optimize our bcopy(). Then copy and return the new pointer.
999 *
1000 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
1001 * necessary align the result.
1002 *
1003 * We can only zoneindex (to align size to the chunk size) if the new
1004 * size is not too large.
1005 */
1006 if (size < ZoneLimit) {
1007 zoneindex(&size, &align);
1008 if (z->z_ChunkSize == size)
1009 return(ptr);
1010 }
1011 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
1012 return(NULL);
1013 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1014 kfree(ptr, type);
1015 return(nptr);
1016 }
1017
1018 /*
1019 * Return the kmalloc limit for this type, in bytes.
1020 */
1021 long
1022 kmalloc_limit(struct malloc_type *type)
1023 {
1024 if (type->ks_limit == 0) {
1025 crit_enter();
1026 if (type->ks_limit == 0)
1027 malloc_init(type);
1028 crit_exit();
1029 }
1030 return(type->ks_limit);
1031 }
1032
1033 /*
1034 * Allocate a copy of the specified string.
1035 *
1036 * (MP SAFE) (MAY BLOCK)
1037 */
1038 #ifdef SLAB_DEBUG
1039 char *
1040 kstrdup_debug(const char *str, struct malloc_type *type,
1041 const char *file, int line)
1042 #else
1043 char *
1044 kstrdup(const char *str, struct malloc_type *type)
1045 #endif
1046 {
1047 int zlen; /* length inclusive of terminating NUL */
1048 char *nstr;
1049
1050 if (str == NULL)
1051 return(NULL);
1052 zlen = strlen(str) + 1;
1053 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1054 bcopy(str, nstr, zlen);
1055 return(nstr);
1056 }
1057
1058 /*
1059 * Notify our cpu that a remote cpu has freed some chunks in a zone that
1060 * we own. RCount will be bumped so the memory should be good, but validate
1061 * that it really is.
1062 */
1063 static
1064 void
1065 kfree_remote(void *ptr)
1066 {
1067 SLGlobalData *slgd;
1068 SLZone *z;
1069 int nfree;
1070 int *kup;
1071
1072 slgd = &mycpu->gd_slab;
1073 z = ptr;
1074 kup = btokup(z);
1075 KKASSERT(*kup == -((int)mycpuid + 1));
1076 KKASSERT(z->z_RCount > 0);
1077 atomic_subtract_int(&z->z_RCount, 1);
1078
1079 logmemory(free_rem_beg, z, NULL, 0L, 0);
1080 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1081 KKASSERT(z->z_Cpu == mycpu->gd_cpuid);
1082 nfree = z->z_NFree;
1083
1084 /*
1085 * Indicate that we will no longer be off of the ZoneAry by
1086 * clearing RSignal.
1087 */
1088 if (z->z_RChunks)
1089 z->z_RSignal = 0;
1090
1091 /*
1092 * Atomically extract the bchunks list and then process it back
1093 * into the lchunks list. We want to append our bchunks to the
1094 * lchunks list and not prepend since we likely do not have
1095 * cache mastership of the related data (not that it helps since
1096 * we are using c_Next).
1097 */
1098 clean_zone_rchunks(z);
1099 if (z->z_NFree && nfree == 0) {
1100 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1101 slgd->ZoneAry[z->z_ZoneIndex] = z;
1102 }
1103
1104 /*
1105 * If the zone becomes totally free, and there are other zones we
1106 * can allocate from, move this zone to the FreeZones list. Since
1107 * this code can be called from an IPI callback, do *NOT* try to mess
1108 * with kernel_map here. Hysteresis will be performed at malloc() time.
1109 *
1110 * Do not move the zone if there is an IPI inflight, otherwise MP
1111 * races can result in our free_remote code accessing a destroyed
1112 * zone.
1113 */
1114 if (z->z_NFree == z->z_NMax &&
1115 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) &&
1116 z->z_RCount == 0
1117 ) {
1118 SLZone **pz;
1119 int *kup;
1120
1121 for (pz = &slgd->ZoneAry[z->z_ZoneIndex];
1122 z != *pz;
1123 pz = &(*pz)->z_Next) {
1124 ;
1125 }
1126 *pz = z->z_Next;
1127 z->z_Magic = -1;
1128 z->z_Next = slgd->FreeZones;
1129 slgd->FreeZones = z;
1130 ++slgd->NFreeZones;
1131 kup = btokup(z);
1132 *kup = 0;
1133 }
1134 logmemory(free_rem_end, z, NULL, 0L, 0);
1135 }
1136
1137 /*
1138 * free (SLAB ALLOCATOR)
1139 *
1140 * Free a memory block previously allocated by malloc. Note that we do not
1141 * attempt to update ks_loosememuse as MP races could prevent us from
1142 * checking memory limits in malloc.
1143 *
1144 * MPSAFE
1145 */
1146 void
1147 kfree(void *ptr, struct malloc_type *type)
1148 {
1149 SLZone *z;
1150 SLChunk *chunk;
1151 SLGlobalData *slgd;
1152 struct globaldata *gd;
1153 int *kup;
1154 unsigned long size;
1155 SLChunk *bchunk;
1156 int rsignal;
1157
1158 logmemory_quick(free_beg);
1159 gd = mycpu;
1160 slgd = &gd->gd_slab;
1161
1162 if (ptr == NULL)
1163 panic("trying to free NULL pointer");
1164
1165 /*
1166 * Handle special 0-byte allocations
1167 */
1168 if (ptr == ZERO_LENGTH_PTR) {
1169 logmemory(free_zero, ptr, type, -1UL, 0);
1170 logmemory_quick(free_end);
1171 return;
1172 }
1173
1174 /*
1175 * Panic on bad malloc type
1176 */
1177 if (type->ks_magic != M_MAGIC)
1178 panic("free: malloc type lacks magic");
1179
1180 /*
1181 * Handle oversized allocations. XXX we really should require that a
1182 * size be passed to free() instead of this nonsense.
1183 *
1184 * This code is never called via an ipi.
1185 */
1186 kup = btokup(ptr);
1187 if (*kup > 0) {
1188 size = *kup << PAGE_SHIFT;
1189 *kup = 0;
1190 #ifdef INVARIANTS
1191 KKASSERT(sizeof(weirdary) <= size);
1192 bcopy(weirdary, ptr, sizeof(weirdary));
1193 #endif
1194 /*
1195 * NOTE: For oversized allocations we do not record the
1196 * originating cpu. It gets freed on the cpu calling
1197 * kfree(). The statistics are in aggregate.
1198 *
1199 * note: XXX we have still inherited the interrupts-can't-block
1200 * assumption. An interrupt thread does not bump
1201 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1202 * primarily until we can fix softupdate's assumptions about free().
1203 */
1204 crit_enter();
1205 --type->ks_inuse[gd->gd_cpuid];
1206 type->ks_memuse[gd->gd_cpuid] -= size;
1207 if (mycpu->gd_intr_nesting_level ||
1208 (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1209 {
1210 logmemory(free_ovsz_delayed, ptr, type, size, 0);
1211 z = (SLZone *)ptr;
1212 z->z_Magic = ZALLOC_OVSZ_MAGIC;
1213 z->z_Next = slgd->FreeOvZones;
1214 z->z_ChunkSize = size;
1215 slgd->FreeOvZones = z;
1216 crit_exit();
1217 } else {
1218 crit_exit();
1219 logmemory(free_ovsz, ptr, type, size, 0);
1220 kmem_slab_free(ptr, size); /* may block */
1221 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1222 }
1223 logmemory_quick(free_end);
1224 return;
1225 }
1226
1227 /*
1228 * Zone case. Figure out the zone based on the fact that it is
1229 * ZoneSize aligned.
1230 */
1231 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1232 kup = btokup(z);
1233 KKASSERT(*kup < 0);
1234 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1235
1236 /*
1237 * If we do not own the zone then use atomic ops to free to the
1238 * remote cpu linked list and notify the target zone using a
1239 * passive message.
1240 *
1241 * The target zone cannot be deallocated while we own a chunk of it,
1242 * so the zone header's storage is stable until the very moment
1243 * we adjust z_RChunks. After that we cannot safely dereference (z).
1244 *
1245 * (no critical section needed)
1246 */
1247 if (z->z_CpuGd != gd) {
1248 /*
1249 * Making these adjustments now allow us to avoid passing (type)
1250 * to the remote cpu. Note that ks_inuse/ks_memuse is being
1251 * adjusted on OUR cpu, not the zone cpu, but it should all still
1252 * sum up properly and cancel out.
1253 */
1254 crit_enter();
1255 --type->ks_inuse[gd->gd_cpuid];
1256 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize;
1257 crit_exit();
1258
1259 /*
1260 * WARNING! This code competes with other cpus. Once we
1261 * successfully link the chunk to RChunks the remote
1262 * cpu can rip z's storage out from under us.
1263 *
1264 * Bumping RCount prevents z's storage from getting
1265 * ripped out.
1266 */
1267 rsignal = z->z_RSignal;
1268 cpu_lfence();
1269 if (rsignal)
1270 atomic_add_int(&z->z_RCount, 1);
1271
1272 chunk = ptr;
1273 for (;;) {
1274 bchunk = z->z_RChunks;
1275 cpu_ccfence();
1276 chunk->c_Next = bchunk;
1277 cpu_sfence();
1278
1279 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1280 break;
1281 }
1282
1283 /*
1284 * We have to signal the remote cpu if our actions will cause
1285 * the remote zone to be placed back on ZoneAry so it can
1286 * move the zone back on.
1287 *
1288 * We only need to deal with NULL->non-NULL RChunk transitions
1289 * and only if z_RSignal is set. We interlock by reading rsignal
1290 * before adding our chunk to RChunks. This should result in
1291 * virtually no IPI traffic.
1292 *
1293 * We can use a passive IPI to reduce overhead even further.
1294 */
1295 if (bchunk == NULL && rsignal) {
1296 logmemory(free_request, ptr, type,
1297 (unsigned long)z->z_ChunkSize, 0);
1298 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1299 /* z can get ripped out from under us from this point on */
1300 } else if (rsignal) {
1301 atomic_subtract_int(&z->z_RCount, 1);
1302 /* z can get ripped out from under us from this point on */
1303 }
1304 logmemory_quick(free_end);
1305 return;
1306 }
1307
1308 /*
1309 * kfree locally
1310 */
1311 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1312
1313 crit_enter();
1314 chunk = ptr;
1315 chunk_mark_free(z, chunk);
1316
1317 /*
1318 * Put weird data into the memory to detect modifications after freeing,
1319 * illegal pointer use after freeing (we should fault on the odd address),
1320 * and so forth. XXX needs more work, see the old malloc code.
1321 */
1322 #ifdef INVARIANTS
1323 if (z->z_ChunkSize < sizeof(weirdary))
1324 bcopy(weirdary, chunk, z->z_ChunkSize);
1325 else
1326 bcopy(weirdary, chunk, sizeof(weirdary));
1327 #endif
1328
1329 /*
1330 * Add this free non-zero'd chunk to a linked list for reuse. Add
1331 * to the front of the linked list so it is more likely to be
1332 * reallocated, since it is already in our L1 cache.
1333 */
1334 #ifdef INVARIANTS
1335 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1336 panic("BADFREE %p", chunk);
1337 #endif
1338 chunk->c_Next = z->z_LChunks;
1339 z->z_LChunks = chunk;
1340 if (chunk->c_Next == NULL)
1341 z->z_LChunksp = &chunk->c_Next;
1342
1343 #ifdef INVARIANTS
1344 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1345 panic("BADFREE2");
1346 #endif
1347
1348 /*
1349 * Bump the number of free chunks. If it becomes non-zero the zone
1350 * must be added back onto the appropriate list.
1351 */
1352 if (z->z_NFree++ == 0) {
1353 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1354 slgd->ZoneAry[z->z_ZoneIndex] = z;
1355 }
1356
1357 --type->ks_inuse[z->z_Cpu];
1358 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
1359
1360 check_zone_free(slgd, z);
1361 logmemory_quick(free_end);
1362 crit_exit();
1363 }
1364
1365 /*
1366 * Cleanup slabs which are hanging around due to RChunks. Called once every
1367 * 10 seconds on all cpus.
1368 */
1369 void
1370 slab_cleanup(void)
1371 {
1372 SLGlobalData *slgd = &mycpu->gd_slab;
1373 SLZone *z;
1374 int i;
1375
1376 crit_enter();
1377 for (i = 0; i < NZONES; ++i) {
1378 if ((z = slgd->ZoneAry[i]) == NULL)
1379 continue;
1380 z = z->z_Next;
1381
1382 /*
1383 * Scan zones starting with the second zone in each list.
1384 */
1385 while (z) {
1386 /*
1387 * Shift all RChunks to the end of the LChunks list. This is
1388 * an O(1) operation.
1389 */
1390 clean_zone_rchunks(z);
1391 z = check_zone_free(slgd, z);
1392 }
1393 }
1394 crit_exit();
1395 }
1396
1397 #if defined(INVARIANTS)
1398
1399 /*
1400 * Helper routines for sanity checks
1401 */
1402 static
1403 void
1404 chunk_mark_allocated(SLZone *z, void *chunk)
1405 {
1406 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1407 __uint32_t *bitptr;
1408
1409 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1410 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1411 ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1412 bitptr = &z->z_Bitmap[bitdex >> 5];
1413 bitdex &= 31;
1414 KASSERT((*bitptr & (1 << bitdex)) == 0,
1415 ("memory chunk %p is already allocated!", chunk));
1416 *bitptr |= 1 << bitdex;
1417 }
1418
1419 static
1420 void
1421 chunk_mark_free(SLZone *z, void *chunk)
1422 {
1423 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1424 __uint32_t *bitptr;
1425
1426 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1427 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1428 ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1429 bitptr = &z->z_Bitmap[bitdex >> 5];
1430 bitdex &= 31;
1431 KASSERT((*bitptr & (1 << bitdex)) != 0,
1432 ("memory chunk %p is already free!", chunk));
1433 *bitptr &= ~(1 << bitdex);
1434 }
1435
1436 #endif
1437
1438 /*
1439 * kmem_slab_alloc()
1440 *
1441 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1442 * specified alignment. M_* flags are expected in the flags field.
1443 *
1444 * Alignment must be a multiple of PAGE_SIZE.
1445 *
1446 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1447 * but when we move zalloc() over to use this function as its backend
1448 * we will have to switch to kreserve/krelease and call reserve(0)
1449 * after the new space is made available.
1450 *
1451 * Interrupt code which has preempted other code is not allowed to
1452 * use PQ_CACHE pages. However, if an interrupt thread is run
1453 * non-preemptively or blocks and then runs non-preemptively, then
1454 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX
1455 */
1456 static void *
1457 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1458 {
1459 vm_size_t i;
1460 vm_offset_t addr;
1461 int count, vmflags, base_vmflags;
1462 vm_page_t mbase = NULL;
1463 vm_page_t m;
1464 thread_t td;
1465
1466 size = round_page(size);
1467 addr = vm_map_min(&kernel_map);
1468
1469 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1470 crit_enter();
1471 vm_map_lock(&kernel_map);
1472 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1473 vm_map_unlock(&kernel_map);
1474 if ((flags & M_NULLOK) == 0)
1475 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1476 vm_map_entry_release(count);
1477 crit_exit();
1478 return(NULL);
1479 }
1480
1481 /*
1482 * kernel_object maps 1:1 to kernel_map.
1483 */
1484 vm_object_hold(&kernel_object);
1485 vm_object_reference_locked(&kernel_object);
1486 vm_map_insert(&kernel_map, &count,
1487 &kernel_object, addr, addr, addr + size,
1488 VM_MAPTYPE_NORMAL,
1489 VM_PROT_ALL, VM_PROT_ALL,
1490 0);
1491 vm_object_drop(&kernel_object);
1492 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1493 vm_map_unlock(&kernel_map);
1494
1495 td = curthread;
1496
1497 base_vmflags = 0;
1498 if (flags & M_ZERO)
1499 base_vmflags |= VM_ALLOC_ZERO;
1500 if (flags & M_USE_RESERVE)
1501 base_vmflags |= VM_ALLOC_SYSTEM;
1502 if (flags & M_USE_INTERRUPT_RESERVE)
1503 base_vmflags |= VM_ALLOC_INTERRUPT;
1504 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1505 panic("kmem_slab_alloc: bad flags %08x (%p)",
1506 flags, ((int **)&size)[-1]);
1507 }
1508
1509 /*
1510 * Allocate the pages. Do not mess with the PG_ZERO flag or map
1511 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting.
1512 *
1513 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1514 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1515 * implied in this case), though I'm not sure if we really need to
1516 * do that.
1517 */
1518 vmflags = base_vmflags;
1519 if (flags & M_WAITOK) {
1520 if (td->td_preempted)
1521 vmflags |= VM_ALLOC_SYSTEM;
1522 else
1523 vmflags |= VM_ALLOC_NORMAL;
1524 }
1525
1526 vm_object_hold(&kernel_object);
1527 for (i = 0; i < size; i += PAGE_SIZE) {
1528 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1529 if (i == 0)
1530 mbase = m;
1531
1532 /*
1533 * If the allocation failed we either return NULL or we retry.
1534 *
1535 * If M_WAITOK is specified we wait for more memory and retry.
1536 * If M_WAITOK is specified from a preemption we yield instead of
1537 * wait. Livelock will not occur because the interrupt thread
1538 * will not be preempting anyone the second time around after the
1539 * yield.
1540 */
1541 if (m == NULL) {
1542 if (flags & M_WAITOK) {
1543 if (td->td_preempted) {
1544 lwkt_switch();
1545 } else {
1546 vm_wait(0);
1547 }
1548 i -= PAGE_SIZE; /* retry */
1549 continue;
1550 }
1551 break;
1552 }
1553 }
1554
1555 /*
1556 * Check and deal with an allocation failure
1557 */
1558 if (i != size) {
1559 while (i != 0) {
1560 i -= PAGE_SIZE;
1561 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1562 /* page should already be busy */
1563 vm_page_free(m);
1564 }
1565 vm_map_lock(&kernel_map);
1566 vm_map_delete(&kernel_map, addr, addr + size, &count);
1567 vm_map_unlock(&kernel_map);
1568 vm_object_drop(&kernel_object);
1569
1570 vm_map_entry_release(count);
1571 crit_exit();
1572 return(NULL);
1573 }
1574
1575 /*
1576 * Success!
1577 *
1578 * NOTE: The VM pages are still busied. mbase points to the first one
1579 * but we have to iterate via vm_page_next()
1580 */
1581 vm_object_drop(&kernel_object);
1582 crit_exit();
1583
1584 /*
1585 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1586 */
1587 m = mbase;
1588 i = 0;
1589
1590 while (i < size) {
1591 /*
1592 * page should already be busy
1593 */
1594 m->valid = VM_PAGE_BITS_ALL;
1595 vm_page_wire(m);
1596 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC,
1597 1, NULL);
1598 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1599 bzero((char *)addr + i, PAGE_SIZE);
1600 vm_page_flag_clear(m, PG_ZERO);
1601 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1602 vm_page_flag_set(m, PG_REFERENCED);
1603 vm_page_wakeup(m);
1604
1605 i += PAGE_SIZE;
1606 vm_object_hold(&kernel_object);
1607 m = vm_page_next(m);
1608 vm_object_drop(&kernel_object);
1609 }
1610 smp_invltlb();
1611 vm_map_entry_release(count);
1612 atomic_add_long(&SlabsAllocated, 1);
1613 return((void *)addr);
1614 }
1615
1616 /*
1617 * kmem_slab_free()
1618 */
1619 static void
1620 kmem_slab_free(void *ptr, vm_size_t size)
1621 {
1622 crit_enter();
1623 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1624 atomic_add_long(&SlabsFreed, 1);
1625 crit_exit();
1626 }
1627
1628 void *
1629 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type,
1630 int flags)
1631 {
1632 #if (__VM_CACHELINE_SIZE == 32)
1633 #define CAN_CACHEALIGN(sz) ((sz) >= 256)
1634 #elif (__VM_CACHELINE_SIZE == 64)
1635 #define CAN_CACHEALIGN(sz) ((sz) >= 512)
1636 #elif (__VM_CACHELINE_SIZE == 128)
1637 #define CAN_CACHEALIGN(sz) ((sz) >= 1024)
1638 #else
1639 #error "unsupported cacheline size"
1640 #endif
1641
1642 void *ret;
1643
1644 if (size_alloc < __VM_CACHELINE_SIZE)
1645 size_alloc = __VM_CACHELINE_SIZE;
1646 else if (!CAN_CACHEALIGN(size_alloc))
1647 flags |= M_POWEROF2;
1648
1649 ret = kmalloc(size_alloc, type, flags);
1650 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0,
1651 ("%p(%lu) not cacheline %d aligned",
1652 ret, size_alloc, __VM_CACHELINE_SIZE));
1653 return ret;
1654
1655 #undef CAN_CACHEALIGN
1656 }
Cache object: 0054c033b9c87450685ba1f4afdbfa60
|