FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_core.c
1 /*-
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * Copyright (c) 2004-2006 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * uma_core.c Implementation of the Universal Memory allocator
31 *
32 * This allocator is intended to replace the multitude of similar object caches
33 * in the standard FreeBSD kernel. The intent is to be flexible as well as
34 * effecient. A primary design goal is to return unused memory to the rest of
35 * the system. This will make the system as a whole more flexible due to the
36 * ability to move memory to subsystems which most need it instead of leaving
37 * pools of reserved memory unused.
38 *
39 * The basic ideas stem from similar slab/zone based allocators whose algorithms
40 * are well known.
41 *
42 */
43
44 /*
45 * TODO:
46 * - Improve memory usage for large allocations
47 * - Investigate cache size adjustments
48 */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD: releng/9.2/sys/vm/uma_core.c 242365 2012-10-30 17:05:12Z mdf $");
52
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/smp.h>
76 #include <sys/vmmeter.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 #include <vm/uma.h>
86 #include <vm/uma_int.h>
87 #include <vm/uma_dbg.h>
88
89 #include <ddb/ddb.h>
90
91 /*
92 * This is the zone and keg from which all zones are spawned. The idea is that
93 * even the zone & keg heads are allocated from the allocator, so we use the
94 * bss section to bootstrap us.
95 */
96 static struct uma_keg masterkeg;
97 static struct uma_zone masterzone_k;
98 static struct uma_zone masterzone_z;
99 static uma_zone_t kegs = &masterzone_k;
100 static uma_zone_t zones = &masterzone_z;
101
102 /* This is the zone from which all of uma_slab_t's are allocated. */
103 static uma_zone_t slabzone;
104 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
105
106 /*
107 * The initial hash tables come out of this zone so they can be allocated
108 * prior to malloc coming up.
109 */
110 static uma_zone_t hashzone;
111
112 /* The boot-time adjusted value for cache line alignment. */
113 int uma_align_cache = 64 - 1;
114
115 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
116
117 /*
118 * Are we allowed to allocate buckets?
119 */
120 static int bucketdisable = 1;
121
122 /* Linked list of all kegs in the system */
123 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
124
125 /* This mutex protects the keg list */
126 static struct mtx uma_mtx;
127
128 /* Linked list of boot time pages */
129 static LIST_HEAD(,uma_slab) uma_boot_pages =
130 LIST_HEAD_INITIALIZER(uma_boot_pages);
131
132 /* This mutex protects the boot time pages list */
133 static struct mtx uma_boot_pages_mtx;
134
135 /* Is the VM done starting up? */
136 static int booted = 0;
137 #define UMA_STARTUP 1
138 #define UMA_STARTUP2 2
139
140 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
141 static u_int uma_max_ipers;
142 static u_int uma_max_ipers_ref;
143
144 /*
145 * This is the handle used to schedule events that need to happen
146 * outside of the allocation fast path.
147 */
148 static struct callout uma_callout;
149 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */
150
151 /*
152 * This structure is passed as the zone ctor arg so that I don't have to create
153 * a special allocation function just for zones.
154 */
155 struct uma_zctor_args {
156 const char *name;
157 size_t size;
158 uma_ctor ctor;
159 uma_dtor dtor;
160 uma_init uminit;
161 uma_fini fini;
162 uma_keg_t keg;
163 int align;
164 u_int32_t flags;
165 };
166
167 struct uma_kctor_args {
168 uma_zone_t zone;
169 size_t size;
170 uma_init uminit;
171 uma_fini fini;
172 int align;
173 u_int32_t flags;
174 };
175
176 struct uma_bucket_zone {
177 uma_zone_t ubz_zone;
178 char *ubz_name;
179 int ubz_entries;
180 };
181
182 #define BUCKET_MAX 128
183
184 struct uma_bucket_zone bucket_zones[] = {
185 { NULL, "16 Bucket", 16 },
186 { NULL, "32 Bucket", 32 },
187 { NULL, "64 Bucket", 64 },
188 { NULL, "128 Bucket", 128 },
189 { NULL, NULL, 0}
190 };
191
192 #define BUCKET_SHIFT 4
193 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
194
195 /*
196 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
197 * of approximately the right size.
198 */
199 static uint8_t bucket_size[BUCKET_ZONES];
200
201 /*
202 * Flags and enumerations to be passed to internal functions.
203 */
204 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
205
206 #define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
207 #define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
208
209 /* Prototypes.. */
210
211 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
212 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
213 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
214 static void page_free(void *, int, u_int8_t);
215 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
216 static void cache_drain(uma_zone_t);
217 static void bucket_drain(uma_zone_t, uma_bucket_t);
218 static void bucket_cache_drain(uma_zone_t zone);
219 static int keg_ctor(void *, int, void *, int);
220 static void keg_dtor(void *, int, void *);
221 static int zone_ctor(void *, int, void *, int);
222 static void zone_dtor(void *, int, void *);
223 static int zero_init(void *, int, int);
224 static void keg_small_init(uma_keg_t keg);
225 static void keg_large_init(uma_keg_t keg);
226 static void zone_foreach(void (*zfunc)(uma_zone_t));
227 static void zone_timeout(uma_zone_t zone);
228 static int hash_alloc(struct uma_hash *);
229 static int hash_expand(struct uma_hash *, struct uma_hash *);
230 static void hash_free(struct uma_hash *hash);
231 static void uma_timeout(void *);
232 static void uma_startup3(void);
233 static void *zone_alloc_item(uma_zone_t, void *, int);
234 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
235 int);
236 static void bucket_enable(void);
237 static void bucket_init(void);
238 static uma_bucket_t bucket_alloc(int, int);
239 static void bucket_free(uma_bucket_t);
240 static void bucket_zone_drain(void);
241 static int zone_alloc_bucket(uma_zone_t zone, int flags);
242 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
243 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
244 static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
245 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
246 uma_fini fini, int align, u_int32_t flags);
247 static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
248 static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
249
250 void uma_print_zone(uma_zone_t);
251 void uma_print_stats(void);
252 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
253 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
254
255 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
256
257 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
258 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
259
260 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
261 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
262
263 /*
264 * This routine checks to see whether or not it's safe to enable buckets.
265 */
266
267 static void
268 bucket_enable(void)
269 {
270 bucketdisable = vm_page_count_min();
271 }
272
273 /*
274 * Initialize bucket_zones, the array of zones of buckets of various sizes.
275 *
276 * For each zone, calculate the memory required for each bucket, consisting
277 * of the header and an array of pointers. Initialize bucket_size[] to point
278 * the range of appropriate bucket sizes at the zone.
279 */
280 static void
281 bucket_init(void)
282 {
283 struct uma_bucket_zone *ubz;
284 int i;
285 int j;
286
287 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
288 int size;
289
290 ubz = &bucket_zones[j];
291 size = roundup(sizeof(struct uma_bucket), sizeof(void *));
292 size += sizeof(void *) * ubz->ubz_entries;
293 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
294 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
295 UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
296 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
297 bucket_size[i >> BUCKET_SHIFT] = j;
298 }
299 }
300
301 /*
302 * Given a desired number of entries for a bucket, return the zone from which
303 * to allocate the bucket.
304 */
305 static struct uma_bucket_zone *
306 bucket_zone_lookup(int entries)
307 {
308 int idx;
309
310 idx = howmany(entries, 1 << BUCKET_SHIFT);
311 return (&bucket_zones[bucket_size[idx]]);
312 }
313
314 static uma_bucket_t
315 bucket_alloc(int entries, int bflags)
316 {
317 struct uma_bucket_zone *ubz;
318 uma_bucket_t bucket;
319
320 /*
321 * This is to stop us from allocating per cpu buckets while we're
322 * running out of vm.boot_pages. Otherwise, we would exhaust the
323 * boot pages. This also prevents us from allocating buckets in
324 * low memory situations.
325 */
326 if (bucketdisable)
327 return (NULL);
328
329 ubz = bucket_zone_lookup(entries);
330 bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
331 if (bucket) {
332 #ifdef INVARIANTS
333 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
334 #endif
335 bucket->ub_cnt = 0;
336 bucket->ub_entries = ubz->ubz_entries;
337 }
338
339 return (bucket);
340 }
341
342 static void
343 bucket_free(uma_bucket_t bucket)
344 {
345 struct uma_bucket_zone *ubz;
346
347 ubz = bucket_zone_lookup(bucket->ub_entries);
348 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
349 ZFREE_STATFREE);
350 }
351
352 static void
353 bucket_zone_drain(void)
354 {
355 struct uma_bucket_zone *ubz;
356
357 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
358 zone_drain(ubz->ubz_zone);
359 }
360
361 static inline uma_keg_t
362 zone_first_keg(uma_zone_t zone)
363 {
364
365 return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
366 }
367
368 static void
369 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
370 {
371 uma_klink_t klink;
372
373 LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
374 kegfn(klink->kl_keg);
375 }
376
377 /*
378 * Routine called by timeout which is used to fire off some time interval
379 * based calculations. (stats, hash size, etc.)
380 *
381 * Arguments:
382 * arg Unused
383 *
384 * Returns:
385 * Nothing
386 */
387 static void
388 uma_timeout(void *unused)
389 {
390 bucket_enable();
391 zone_foreach(zone_timeout);
392
393 /* Reschedule this event */
394 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
395 }
396
397 /*
398 * Routine to perform timeout driven calculations. This expands the
399 * hashes and does per cpu statistics aggregation.
400 *
401 * Returns nothing.
402 */
403 static void
404 keg_timeout(uma_keg_t keg)
405 {
406
407 KEG_LOCK(keg);
408 /*
409 * Expand the keg hash table.
410 *
411 * This is done if the number of slabs is larger than the hash size.
412 * What I'm trying to do here is completely reduce collisions. This
413 * may be a little aggressive. Should I allow for two collisions max?
414 */
415 if (keg->uk_flags & UMA_ZONE_HASH &&
416 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
417 struct uma_hash newhash;
418 struct uma_hash oldhash;
419 int ret;
420
421 /*
422 * This is so involved because allocating and freeing
423 * while the keg lock is held will lead to deadlock.
424 * I have to do everything in stages and check for
425 * races.
426 */
427 newhash = keg->uk_hash;
428 KEG_UNLOCK(keg);
429 ret = hash_alloc(&newhash);
430 KEG_LOCK(keg);
431 if (ret) {
432 if (hash_expand(&keg->uk_hash, &newhash)) {
433 oldhash = keg->uk_hash;
434 keg->uk_hash = newhash;
435 } else
436 oldhash = newhash;
437
438 KEG_UNLOCK(keg);
439 hash_free(&oldhash);
440 KEG_LOCK(keg);
441 }
442 }
443 KEG_UNLOCK(keg);
444 }
445
446 static void
447 zone_timeout(uma_zone_t zone)
448 {
449
450 zone_foreach_keg(zone, &keg_timeout);
451 }
452
453 /*
454 * Allocate and zero fill the next sized hash table from the appropriate
455 * backing store.
456 *
457 * Arguments:
458 * hash A new hash structure with the old hash size in uh_hashsize
459 *
460 * Returns:
461 * 1 on sucess and 0 on failure.
462 */
463 static int
464 hash_alloc(struct uma_hash *hash)
465 {
466 int oldsize;
467 int alloc;
468
469 oldsize = hash->uh_hashsize;
470
471 /* We're just going to go to a power of two greater */
472 if (oldsize) {
473 hash->uh_hashsize = oldsize * 2;
474 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
475 hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
476 M_UMAHASH, M_NOWAIT);
477 } else {
478 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
479 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
480 M_WAITOK);
481 hash->uh_hashsize = UMA_HASH_SIZE_INIT;
482 }
483 if (hash->uh_slab_hash) {
484 bzero(hash->uh_slab_hash, alloc);
485 hash->uh_hashmask = hash->uh_hashsize - 1;
486 return (1);
487 }
488
489 return (0);
490 }
491
492 /*
493 * Expands the hash table for HASH zones. This is done from zone_timeout
494 * to reduce collisions. This must not be done in the regular allocation
495 * path, otherwise, we can recurse on the vm while allocating pages.
496 *
497 * Arguments:
498 * oldhash The hash you want to expand
499 * newhash The hash structure for the new table
500 *
501 * Returns:
502 * Nothing
503 *
504 * Discussion:
505 */
506 static int
507 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
508 {
509 uma_slab_t slab;
510 int hval;
511 int i;
512
513 if (!newhash->uh_slab_hash)
514 return (0);
515
516 if (oldhash->uh_hashsize >= newhash->uh_hashsize)
517 return (0);
518
519 /*
520 * I need to investigate hash algorithms for resizing without a
521 * full rehash.
522 */
523
524 for (i = 0; i < oldhash->uh_hashsize; i++)
525 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
526 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
527 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
528 hval = UMA_HASH(newhash, slab->us_data);
529 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
530 slab, us_hlink);
531 }
532
533 return (1);
534 }
535
536 /*
537 * Free the hash bucket to the appropriate backing store.
538 *
539 * Arguments:
540 * slab_hash The hash bucket we're freeing
541 * hashsize The number of entries in that hash bucket
542 *
543 * Returns:
544 * Nothing
545 */
546 static void
547 hash_free(struct uma_hash *hash)
548 {
549 if (hash->uh_slab_hash == NULL)
550 return;
551 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
552 zone_free_item(hashzone,
553 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
554 else
555 free(hash->uh_slab_hash, M_UMAHASH);
556 }
557
558 /*
559 * Frees all outstanding items in a bucket
560 *
561 * Arguments:
562 * zone The zone to free to, must be unlocked.
563 * bucket The free/alloc bucket with items, cpu queue must be locked.
564 *
565 * Returns:
566 * Nothing
567 */
568
569 static void
570 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
571 {
572 void *item;
573
574 if (bucket == NULL)
575 return;
576
577 while (bucket->ub_cnt > 0) {
578 bucket->ub_cnt--;
579 item = bucket->ub_bucket[bucket->ub_cnt];
580 #ifdef INVARIANTS
581 bucket->ub_bucket[bucket->ub_cnt] = NULL;
582 KASSERT(item != NULL,
583 ("bucket_drain: botched ptr, item is NULL"));
584 #endif
585 zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
586 }
587 }
588
589 /*
590 * Drains the per cpu caches for a zone.
591 *
592 * NOTE: This may only be called while the zone is being turn down, and not
593 * during normal operation. This is necessary in order that we do not have
594 * to migrate CPUs to drain the per-CPU caches.
595 *
596 * Arguments:
597 * zone The zone to drain, must be unlocked.
598 *
599 * Returns:
600 * Nothing
601 */
602 static void
603 cache_drain(uma_zone_t zone)
604 {
605 uma_cache_t cache;
606 int cpu;
607
608 /*
609 * XXX: It is safe to not lock the per-CPU caches, because we're
610 * tearing down the zone anyway. I.e., there will be no further use
611 * of the caches at this point.
612 *
613 * XXX: It would good to be able to assert that the zone is being
614 * torn down to prevent improper use of cache_drain().
615 *
616 * XXX: We lock the zone before passing into bucket_cache_drain() as
617 * it is used elsewhere. Should the tear-down path be made special
618 * there in some form?
619 */
620 CPU_FOREACH(cpu) {
621 cache = &zone->uz_cpu[cpu];
622 bucket_drain(zone, cache->uc_allocbucket);
623 bucket_drain(zone, cache->uc_freebucket);
624 if (cache->uc_allocbucket != NULL)
625 bucket_free(cache->uc_allocbucket);
626 if (cache->uc_freebucket != NULL)
627 bucket_free(cache->uc_freebucket);
628 cache->uc_allocbucket = cache->uc_freebucket = NULL;
629 }
630 ZONE_LOCK(zone);
631 bucket_cache_drain(zone);
632 ZONE_UNLOCK(zone);
633 }
634
635 /*
636 * Drain the cached buckets from a zone. Expects a locked zone on entry.
637 */
638 static void
639 bucket_cache_drain(uma_zone_t zone)
640 {
641 uma_bucket_t bucket;
642
643 /*
644 * Drain the bucket queues and free the buckets, we just keep two per
645 * cpu (alloc/free).
646 */
647 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
648 LIST_REMOVE(bucket, ub_link);
649 ZONE_UNLOCK(zone);
650 bucket_drain(zone, bucket);
651 bucket_free(bucket);
652 ZONE_LOCK(zone);
653 }
654
655 /* Now we do the free queue.. */
656 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
657 LIST_REMOVE(bucket, ub_link);
658 bucket_free(bucket);
659 }
660 }
661
662 /*
663 * Frees pages from a keg back to the system. This is done on demand from
664 * the pageout daemon.
665 *
666 * Returns nothing.
667 */
668 static void
669 keg_drain(uma_keg_t keg)
670 {
671 struct slabhead freeslabs = { 0 };
672 uma_slab_t slab;
673 uma_slab_t n;
674 u_int8_t flags;
675 u_int8_t *mem;
676 int i;
677
678 /*
679 * We don't want to take pages from statically allocated kegs at this
680 * time
681 */
682 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
683 return;
684
685 #ifdef UMA_DEBUG
686 printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
687 #endif
688 KEG_LOCK(keg);
689 if (keg->uk_free == 0)
690 goto finished;
691
692 slab = LIST_FIRST(&keg->uk_free_slab);
693 while (slab) {
694 n = LIST_NEXT(slab, us_link);
695
696 /* We have no where to free these to */
697 if (slab->us_flags & UMA_SLAB_BOOT) {
698 slab = n;
699 continue;
700 }
701
702 LIST_REMOVE(slab, us_link);
703 keg->uk_pages -= keg->uk_ppera;
704 keg->uk_free -= keg->uk_ipers;
705
706 if (keg->uk_flags & UMA_ZONE_HASH)
707 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
708
709 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
710
711 slab = n;
712 }
713 finished:
714 KEG_UNLOCK(keg);
715
716 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
717 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
718 if (keg->uk_fini)
719 for (i = 0; i < keg->uk_ipers; i++)
720 keg->uk_fini(
721 slab->us_data + (keg->uk_rsize * i),
722 keg->uk_size);
723 flags = slab->us_flags;
724 mem = slab->us_data;
725
726 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
727 vm_object_t obj;
728
729 if (flags & UMA_SLAB_KMEM)
730 obj = kmem_object;
731 else if (flags & UMA_SLAB_KERNEL)
732 obj = kernel_object;
733 else
734 obj = NULL;
735 for (i = 0; i < keg->uk_ppera; i++)
736 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
737 obj);
738 }
739 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
740 zone_free_item(keg->uk_slabzone, slab, NULL,
741 SKIP_NONE, ZFREE_STATFREE);
742 #ifdef UMA_DEBUG
743 printf("%s: Returning %d bytes.\n",
744 keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
745 #endif
746 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
747 }
748 }
749
750 static void
751 zone_drain_wait(uma_zone_t zone, int waitok)
752 {
753
754 /*
755 * Set draining to interlock with zone_dtor() so we can release our
756 * locks as we go. Only dtor() should do a WAITOK call since it
757 * is the only call that knows the structure will still be available
758 * when it wakes up.
759 */
760 ZONE_LOCK(zone);
761 while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
762 if (waitok == M_NOWAIT)
763 goto out;
764 mtx_unlock(&uma_mtx);
765 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
766 mtx_lock(&uma_mtx);
767 }
768 zone->uz_flags |= UMA_ZFLAG_DRAINING;
769 bucket_cache_drain(zone);
770 ZONE_UNLOCK(zone);
771 /*
772 * The DRAINING flag protects us from being freed while
773 * we're running. Normally the uma_mtx would protect us but we
774 * must be able to release and acquire the right lock for each keg.
775 */
776 zone_foreach_keg(zone, &keg_drain);
777 ZONE_LOCK(zone);
778 zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
779 wakeup(zone);
780 out:
781 ZONE_UNLOCK(zone);
782 }
783
784 void
785 zone_drain(uma_zone_t zone)
786 {
787
788 zone_drain_wait(zone, M_NOWAIT);
789 }
790
791 /*
792 * Allocate a new slab for a keg. This does not insert the slab onto a list.
793 *
794 * Arguments:
795 * wait Shall we wait?
796 *
797 * Returns:
798 * The slab that was allocated or NULL if there is no memory and the
799 * caller specified M_NOWAIT.
800 */
801 static uma_slab_t
802 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
803 {
804 uma_slabrefcnt_t slabref;
805 uma_alloc allocf;
806 uma_slab_t slab;
807 u_int8_t *mem;
808 u_int8_t flags;
809 int i;
810
811 mtx_assert(&keg->uk_lock, MA_OWNED);
812 slab = NULL;
813
814 #ifdef UMA_DEBUG
815 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
816 #endif
817 allocf = keg->uk_allocf;
818 KEG_UNLOCK(keg);
819
820 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
821 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
822 if (slab == NULL) {
823 KEG_LOCK(keg);
824 return NULL;
825 }
826 }
827
828 /*
829 * This reproduces the old vm_zone behavior of zero filling pages the
830 * first time they are added to a zone.
831 *
832 * Malloced items are zeroed in uma_zalloc.
833 */
834
835 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
836 wait |= M_ZERO;
837 else
838 wait &= ~M_ZERO;
839
840 if (keg->uk_flags & UMA_ZONE_NODUMP)
841 wait |= M_NODUMP;
842
843 /* zone is passed for legacy reasons. */
844 mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
845 if (mem == NULL) {
846 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
847 zone_free_item(keg->uk_slabzone, slab, NULL,
848 SKIP_NONE, ZFREE_STATFREE);
849 KEG_LOCK(keg);
850 return (NULL);
851 }
852
853 /* Point the slab into the allocated memory */
854 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
855 slab = (uma_slab_t )(mem + keg->uk_pgoff);
856
857 if (keg->uk_flags & UMA_ZONE_VTOSLAB)
858 for (i = 0; i < keg->uk_ppera; i++)
859 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
860
861 slab->us_keg = keg;
862 slab->us_data = mem;
863 slab->us_freecount = keg->uk_ipers;
864 slab->us_firstfree = 0;
865 slab->us_flags = flags;
866
867 if (keg->uk_flags & UMA_ZONE_REFCNT) {
868 slabref = (uma_slabrefcnt_t)slab;
869 for (i = 0; i < keg->uk_ipers; i++) {
870 slabref->us_freelist[i].us_refcnt = 0;
871 slabref->us_freelist[i].us_item = i+1;
872 }
873 } else {
874 for (i = 0; i < keg->uk_ipers; i++)
875 slab->us_freelist[i].us_item = i+1;
876 }
877
878 if (keg->uk_init != NULL) {
879 for (i = 0; i < keg->uk_ipers; i++)
880 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
881 keg->uk_size, wait) != 0)
882 break;
883 if (i != keg->uk_ipers) {
884 if (keg->uk_fini != NULL) {
885 for (i--; i > -1; i--)
886 keg->uk_fini(slab->us_data +
887 (keg->uk_rsize * i),
888 keg->uk_size);
889 }
890 if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
891 vm_object_t obj;
892
893 if (flags & UMA_SLAB_KMEM)
894 obj = kmem_object;
895 else if (flags & UMA_SLAB_KERNEL)
896 obj = kernel_object;
897 else
898 obj = NULL;
899 for (i = 0; i < keg->uk_ppera; i++)
900 vsetobj((vm_offset_t)mem +
901 (i * PAGE_SIZE), obj);
902 }
903 if (keg->uk_flags & UMA_ZONE_OFFPAGE)
904 zone_free_item(keg->uk_slabzone, slab,
905 NULL, SKIP_NONE, ZFREE_STATFREE);
906 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
907 flags);
908 KEG_LOCK(keg);
909 return (NULL);
910 }
911 }
912 KEG_LOCK(keg);
913
914 if (keg->uk_flags & UMA_ZONE_HASH)
915 UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
916
917 keg->uk_pages += keg->uk_ppera;
918 keg->uk_free += keg->uk_ipers;
919
920 return (slab);
921 }
922
923 /*
924 * This function is intended to be used early on in place of page_alloc() so
925 * that we may use the boot time page cache to satisfy allocations before
926 * the VM is ready.
927 */
928 static void *
929 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
930 {
931 uma_keg_t keg;
932 uma_slab_t tmps;
933 int pages, check_pages;
934
935 keg = zone_first_keg(zone);
936 pages = howmany(bytes, PAGE_SIZE);
937 check_pages = pages - 1;
938 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
939
940 /*
941 * Check our small startup cache to see if it has pages remaining.
942 */
943 mtx_lock(&uma_boot_pages_mtx);
944
945 /* First check if we have enough room. */
946 tmps = LIST_FIRST(&uma_boot_pages);
947 while (tmps != NULL && check_pages-- > 0)
948 tmps = LIST_NEXT(tmps, us_link);
949 if (tmps != NULL) {
950 /*
951 * It's ok to lose tmps references. The last one will
952 * have tmps->us_data pointing to the start address of
953 * "pages" contiguous pages of memory.
954 */
955 while (pages-- > 0) {
956 tmps = LIST_FIRST(&uma_boot_pages);
957 LIST_REMOVE(tmps, us_link);
958 }
959 mtx_unlock(&uma_boot_pages_mtx);
960 *pflag = tmps->us_flags;
961 return (tmps->us_data);
962 }
963 mtx_unlock(&uma_boot_pages_mtx);
964 if (booted < UMA_STARTUP2)
965 panic("UMA: Increase vm.boot_pages");
966 /*
967 * Now that we've booted reset these users to their real allocator.
968 */
969 #ifdef UMA_MD_SMALL_ALLOC
970 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
971 #else
972 keg->uk_allocf = page_alloc;
973 #endif
974 return keg->uk_allocf(zone, bytes, pflag, wait);
975 }
976
977 /*
978 * Allocates a number of pages from the system
979 *
980 * Arguments:
981 * bytes The number of bytes requested
982 * wait Shall we wait?
983 *
984 * Returns:
985 * A pointer to the alloced memory or possibly
986 * NULL if M_NOWAIT is set.
987 */
988 static void *
989 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
990 {
991 void *p; /* Returned page */
992
993 *pflag = UMA_SLAB_KMEM;
994 p = (void *) kmem_malloc(kmem_map, bytes, wait);
995
996 return (p);
997 }
998
999 /*
1000 * Allocates a number of pages from within an object
1001 *
1002 * Arguments:
1003 * bytes The number of bytes requested
1004 * wait Shall we wait?
1005 *
1006 * Returns:
1007 * A pointer to the alloced memory or possibly
1008 * NULL if M_NOWAIT is set.
1009 */
1010 static void *
1011 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1012 {
1013 vm_object_t object;
1014 vm_offset_t retkva, zkva;
1015 vm_page_t p;
1016 int pages, startpages;
1017 uma_keg_t keg;
1018
1019 keg = zone_first_keg(zone);
1020 object = keg->uk_obj;
1021 retkva = 0;
1022
1023 /*
1024 * This looks a little weird since we're getting one page at a time.
1025 */
1026 VM_OBJECT_LOCK(object);
1027 p = TAILQ_LAST(&object->memq, pglist);
1028 pages = p != NULL ? p->pindex + 1 : 0;
1029 startpages = pages;
1030 zkva = keg->uk_kva + pages * PAGE_SIZE;
1031 for (; bytes > 0; bytes -= PAGE_SIZE) {
1032 p = vm_page_alloc(object, pages,
1033 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
1034 if (p == NULL) {
1035 if (pages != startpages)
1036 pmap_qremove(retkva, pages - startpages);
1037 while (pages != startpages) {
1038 pages--;
1039 p = TAILQ_LAST(&object->memq, pglist);
1040 vm_page_unwire(p, 0);
1041 vm_page_free(p);
1042 }
1043 retkva = 0;
1044 goto done;
1045 }
1046 pmap_qenter(zkva, &p, 1);
1047 if (retkva == 0)
1048 retkva = zkva;
1049 zkva += PAGE_SIZE;
1050 pages += 1;
1051 }
1052 done:
1053 VM_OBJECT_UNLOCK(object);
1054 *flags = UMA_SLAB_PRIV;
1055
1056 return ((void *)retkva);
1057 }
1058
1059 /*
1060 * Frees a number of pages to the system
1061 *
1062 * Arguments:
1063 * mem A pointer to the memory to be freed
1064 * size The size of the memory being freed
1065 * flags The original p->us_flags field
1066 *
1067 * Returns:
1068 * Nothing
1069 */
1070 static void
1071 page_free(void *mem, int size, u_int8_t flags)
1072 {
1073 vm_map_t map;
1074
1075 if (flags & UMA_SLAB_KMEM)
1076 map = kmem_map;
1077 else if (flags & UMA_SLAB_KERNEL)
1078 map = kernel_map;
1079 else
1080 panic("UMA: page_free used with invalid flags %d", flags);
1081
1082 kmem_free(map, (vm_offset_t)mem, size);
1083 }
1084
1085 /*
1086 * Zero fill initializer
1087 *
1088 * Arguments/Returns follow uma_init specifications
1089 */
1090 static int
1091 zero_init(void *mem, int size, int flags)
1092 {
1093 bzero(mem, size);
1094 return (0);
1095 }
1096
1097 /*
1098 * Finish creating a small uma keg. This calculates ipers, and the keg size.
1099 *
1100 * Arguments
1101 * keg The zone we should initialize
1102 *
1103 * Returns
1104 * Nothing
1105 */
1106 static void
1107 keg_small_init(uma_keg_t keg)
1108 {
1109 u_int rsize;
1110 u_int memused;
1111 u_int wastedspace;
1112 u_int shsize;
1113
1114 KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
1115 rsize = keg->uk_size;
1116
1117 if (rsize < UMA_SMALLEST_UNIT)
1118 rsize = UMA_SMALLEST_UNIT;
1119 if (rsize & keg->uk_align)
1120 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1121
1122 keg->uk_rsize = rsize;
1123 keg->uk_ppera = 1;
1124
1125 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1126 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
1127 shsize = sizeof(struct uma_slab_refcnt);
1128 } else {
1129 rsize += UMA_FRITM_SZ; /* Account for linkage */
1130 shsize = sizeof(struct uma_slab);
1131 }
1132
1133 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1134 KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
1135 memused = keg->uk_ipers * rsize + shsize;
1136 wastedspace = UMA_SLAB_SIZE - memused;
1137
1138 /*
1139 * We can't do OFFPAGE if we're internal or if we've been
1140 * asked to not go to the VM for buckets. If we do this we
1141 * may end up going to the VM (kmem_map) for slabs which we
1142 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1143 * result of UMA_ZONE_VM, which clearly forbids it.
1144 */
1145 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1146 (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1147 return;
1148
1149 if ((wastedspace >= UMA_MAX_WASTE) &&
1150 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1151 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1152 KASSERT(keg->uk_ipers <= 255,
1153 ("keg_small_init: keg->uk_ipers too high!"));
1154 #ifdef UMA_DEBUG
1155 printf("UMA decided we need offpage slab headers for "
1156 "keg: %s, calculated wastedspace = %d, "
1157 "maximum wasted space allowed = %d, "
1158 "calculated ipers = %d, "
1159 "new wasted space = %d\n", keg->uk_name, wastedspace,
1160 UMA_MAX_WASTE, keg->uk_ipers,
1161 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1162 #endif
1163 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1164 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1165 keg->uk_flags |= UMA_ZONE_HASH;
1166 }
1167 }
1168
1169 /*
1170 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
1171 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
1172 * more complicated.
1173 *
1174 * Arguments
1175 * keg The keg we should initialize
1176 *
1177 * Returns
1178 * Nothing
1179 */
1180 static void
1181 keg_large_init(uma_keg_t keg)
1182 {
1183 int pages;
1184
1185 KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1186 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1187 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1188
1189 pages = keg->uk_size / UMA_SLAB_SIZE;
1190
1191 /* Account for remainder */
1192 if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1193 pages++;
1194
1195 keg->uk_ppera = pages;
1196 keg->uk_ipers = 1;
1197 keg->uk_rsize = keg->uk_size;
1198
1199 /* We can't do OFFPAGE if we're internal, bail out here. */
1200 if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1201 return;
1202
1203 keg->uk_flags |= UMA_ZONE_OFFPAGE;
1204 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1205 keg->uk_flags |= UMA_ZONE_HASH;
1206 }
1207
1208 static void
1209 keg_cachespread_init(uma_keg_t keg)
1210 {
1211 int alignsize;
1212 int trailer;
1213 int pages;
1214 int rsize;
1215
1216 alignsize = keg->uk_align + 1;
1217 rsize = keg->uk_size;
1218 /*
1219 * We want one item to start on every align boundary in a page. To
1220 * do this we will span pages. We will also extend the item by the
1221 * size of align if it is an even multiple of align. Otherwise, it
1222 * would fall on the same boundary every time.
1223 */
1224 if (rsize & keg->uk_align)
1225 rsize = (rsize & ~keg->uk_align) + alignsize;
1226 if ((rsize & alignsize) == 0)
1227 rsize += alignsize;
1228 trailer = rsize - keg->uk_size;
1229 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1230 pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1231 keg->uk_rsize = rsize;
1232 keg->uk_ppera = pages;
1233 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1234 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1235 KASSERT(keg->uk_ipers <= uma_max_ipers,
1236 ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
1237 keg->uk_ipers));
1238 }
1239
1240 /*
1241 * Keg header ctor. This initializes all fields, locks, etc. And inserts
1242 * the keg onto the global keg list.
1243 *
1244 * Arguments/Returns follow uma_ctor specifications
1245 * udata Actually uma_kctor_args
1246 */
1247 static int
1248 keg_ctor(void *mem, int size, void *udata, int flags)
1249 {
1250 struct uma_kctor_args *arg = udata;
1251 uma_keg_t keg = mem;
1252 uma_zone_t zone;
1253
1254 bzero(keg, size);
1255 keg->uk_size = arg->size;
1256 keg->uk_init = arg->uminit;
1257 keg->uk_fini = arg->fini;
1258 keg->uk_align = arg->align;
1259 keg->uk_free = 0;
1260 keg->uk_pages = 0;
1261 keg->uk_flags = arg->flags;
1262 keg->uk_allocf = page_alloc;
1263 keg->uk_freef = page_free;
1264 keg->uk_recurse = 0;
1265 keg->uk_slabzone = NULL;
1266
1267 /*
1268 * The master zone is passed to us at keg-creation time.
1269 */
1270 zone = arg->zone;
1271 keg->uk_name = zone->uz_name;
1272
1273 if (arg->flags & UMA_ZONE_VM)
1274 keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1275
1276 if (arg->flags & UMA_ZONE_ZINIT)
1277 keg->uk_init = zero_init;
1278
1279 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1280 keg->uk_flags |= UMA_ZONE_VTOSLAB;
1281
1282 /*
1283 * The +UMA_FRITM_SZ added to uk_size is to account for the
1284 * linkage that is added to the size in keg_small_init(). If
1285 * we don't account for this here then we may end up in
1286 * keg_small_init() with a calculated 'ipers' of 0.
1287 */
1288 if (keg->uk_flags & UMA_ZONE_REFCNT) {
1289 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1290 keg_cachespread_init(keg);
1291 else if ((keg->uk_size+UMA_FRITMREF_SZ) >
1292 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1293 keg_large_init(keg);
1294 else
1295 keg_small_init(keg);
1296 } else {
1297 if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1298 keg_cachespread_init(keg);
1299 else if ((keg->uk_size+UMA_FRITM_SZ) >
1300 (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1301 keg_large_init(keg);
1302 else
1303 keg_small_init(keg);
1304 }
1305
1306 if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1307 if (keg->uk_flags & UMA_ZONE_REFCNT)
1308 keg->uk_slabzone = slabrefzone;
1309 else
1310 keg->uk_slabzone = slabzone;
1311 }
1312
1313 /*
1314 * If we haven't booted yet we need allocations to go through the
1315 * startup cache until the vm is ready.
1316 */
1317 if (keg->uk_ppera == 1) {
1318 #ifdef UMA_MD_SMALL_ALLOC
1319 keg->uk_allocf = uma_small_alloc;
1320 keg->uk_freef = uma_small_free;
1321
1322 if (booted < UMA_STARTUP)
1323 keg->uk_allocf = startup_alloc;
1324 #else
1325 if (booted < UMA_STARTUP2)
1326 keg->uk_allocf = startup_alloc;
1327 #endif
1328 } else if (booted < UMA_STARTUP2 &&
1329 (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1330 keg->uk_allocf = startup_alloc;
1331
1332 /*
1333 * Initialize keg's lock (shared among zones).
1334 */
1335 if (arg->flags & UMA_ZONE_MTXCLASS)
1336 KEG_LOCK_INIT(keg, 1);
1337 else
1338 KEG_LOCK_INIT(keg, 0);
1339
1340 /*
1341 * If we're putting the slab header in the actual page we need to
1342 * figure out where in each page it goes. This calculates a right
1343 * justified offset into the memory on an ALIGN_PTR boundary.
1344 */
1345 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1346 u_int totsize;
1347
1348 /* Size of the slab struct and free list */
1349 if (keg->uk_flags & UMA_ZONE_REFCNT)
1350 totsize = sizeof(struct uma_slab_refcnt) +
1351 keg->uk_ipers * UMA_FRITMREF_SZ;
1352 else
1353 totsize = sizeof(struct uma_slab) +
1354 keg->uk_ipers * UMA_FRITM_SZ;
1355
1356 if (totsize & UMA_ALIGN_PTR)
1357 totsize = (totsize & ~UMA_ALIGN_PTR) +
1358 (UMA_ALIGN_PTR + 1);
1359 keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
1360
1361 if (keg->uk_flags & UMA_ZONE_REFCNT)
1362 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1363 + keg->uk_ipers * UMA_FRITMREF_SZ;
1364 else
1365 totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1366 + keg->uk_ipers * UMA_FRITM_SZ;
1367
1368 /*
1369 * The only way the following is possible is if with our
1370 * UMA_ALIGN_PTR adjustments we are now bigger than
1371 * UMA_SLAB_SIZE. I haven't checked whether this is
1372 * mathematically possible for all cases, so we make
1373 * sure here anyway.
1374 */
1375 if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
1376 printf("zone %s ipers %d rsize %d size %d\n",
1377 zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1378 keg->uk_size);
1379 panic("UMA slab won't fit.");
1380 }
1381 }
1382
1383 if (keg->uk_flags & UMA_ZONE_HASH)
1384 hash_alloc(&keg->uk_hash);
1385
1386 #ifdef UMA_DEBUG
1387 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1388 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1389 keg->uk_ipers, keg->uk_ppera,
1390 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1391 #endif
1392
1393 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1394
1395 mtx_lock(&uma_mtx);
1396 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1397 mtx_unlock(&uma_mtx);
1398 return (0);
1399 }
1400
1401 /*
1402 * Zone header ctor. This initializes all fields, locks, etc.
1403 *
1404 * Arguments/Returns follow uma_ctor specifications
1405 * udata Actually uma_zctor_args
1406 */
1407 static int
1408 zone_ctor(void *mem, int size, void *udata, int flags)
1409 {
1410 struct uma_zctor_args *arg = udata;
1411 uma_zone_t zone = mem;
1412 uma_zone_t z;
1413 uma_keg_t keg;
1414
1415 bzero(zone, size);
1416 zone->uz_name = arg->name;
1417 zone->uz_ctor = arg->ctor;
1418 zone->uz_dtor = arg->dtor;
1419 zone->uz_slab = zone_fetch_slab;
1420 zone->uz_init = NULL;
1421 zone->uz_fini = NULL;
1422 zone->uz_allocs = 0;
1423 zone->uz_frees = 0;
1424 zone->uz_fails = 0;
1425 zone->uz_sleeps = 0;
1426 zone->uz_fills = zone->uz_count = 0;
1427 zone->uz_flags = 0;
1428 keg = arg->keg;
1429
1430 if (arg->flags & UMA_ZONE_SECONDARY) {
1431 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1432 zone->uz_init = arg->uminit;
1433 zone->uz_fini = arg->fini;
1434 zone->uz_lock = &keg->uk_lock;
1435 zone->uz_flags |= UMA_ZONE_SECONDARY;
1436 mtx_lock(&uma_mtx);
1437 ZONE_LOCK(zone);
1438 LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1439 if (LIST_NEXT(z, uz_link) == NULL) {
1440 LIST_INSERT_AFTER(z, zone, uz_link);
1441 break;
1442 }
1443 }
1444 ZONE_UNLOCK(zone);
1445 mtx_unlock(&uma_mtx);
1446 } else if (keg == NULL) {
1447 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1448 arg->align, arg->flags)) == NULL)
1449 return (ENOMEM);
1450 } else {
1451 struct uma_kctor_args karg;
1452 int error;
1453
1454 /* We should only be here from uma_startup() */
1455 karg.size = arg->size;
1456 karg.uminit = arg->uminit;
1457 karg.fini = arg->fini;
1458 karg.align = arg->align;
1459 karg.flags = arg->flags;
1460 karg.zone = zone;
1461 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1462 flags);
1463 if (error)
1464 return (error);
1465 }
1466 /*
1467 * Link in the first keg.
1468 */
1469 zone->uz_klink.kl_keg = keg;
1470 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1471 zone->uz_lock = &keg->uk_lock;
1472 zone->uz_size = keg->uk_size;
1473 zone->uz_flags |= (keg->uk_flags &
1474 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1475
1476 /*
1477 * Some internal zones don't have room allocated for the per cpu
1478 * caches. If we're internal, bail out here.
1479 */
1480 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1481 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1482 ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1483 return (0);
1484 }
1485
1486 if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1487 zone->uz_count = BUCKET_MAX;
1488 else if (keg->uk_ipers <= BUCKET_MAX)
1489 zone->uz_count = keg->uk_ipers;
1490 else
1491 zone->uz_count = BUCKET_MAX;
1492 return (0);
1493 }
1494
1495 /*
1496 * Keg header dtor. This frees all data, destroys locks, frees the hash
1497 * table and removes the keg from the global list.
1498 *
1499 * Arguments/Returns follow uma_dtor specifications
1500 * udata unused
1501 */
1502 static void
1503 keg_dtor(void *arg, int size, void *udata)
1504 {
1505 uma_keg_t keg;
1506
1507 keg = (uma_keg_t)arg;
1508 KEG_LOCK(keg);
1509 if (keg->uk_free != 0) {
1510 printf("Freed UMA keg was not empty (%d items). "
1511 " Lost %d pages of memory.\n",
1512 keg->uk_free, keg->uk_pages);
1513 }
1514 KEG_UNLOCK(keg);
1515
1516 hash_free(&keg->uk_hash);
1517
1518 KEG_LOCK_FINI(keg);
1519 }
1520
1521 /*
1522 * Zone header dtor.
1523 *
1524 * Arguments/Returns follow uma_dtor specifications
1525 * udata unused
1526 */
1527 static void
1528 zone_dtor(void *arg, int size, void *udata)
1529 {
1530 uma_klink_t klink;
1531 uma_zone_t zone;
1532 uma_keg_t keg;
1533
1534 zone = (uma_zone_t)arg;
1535 keg = zone_first_keg(zone);
1536
1537 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1538 cache_drain(zone);
1539
1540 mtx_lock(&uma_mtx);
1541 LIST_REMOVE(zone, uz_link);
1542 mtx_unlock(&uma_mtx);
1543 /*
1544 * XXX there are some races here where
1545 * the zone can be drained but zone lock
1546 * released and then refilled before we
1547 * remove it... we dont care for now
1548 */
1549 zone_drain_wait(zone, M_WAITOK);
1550 /*
1551 * Unlink all of our kegs.
1552 */
1553 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1554 klink->kl_keg = NULL;
1555 LIST_REMOVE(klink, kl_link);
1556 if (klink == &zone->uz_klink)
1557 continue;
1558 free(klink, M_TEMP);
1559 }
1560 /*
1561 * We only destroy kegs from non secondary zones.
1562 */
1563 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
1564 mtx_lock(&uma_mtx);
1565 LIST_REMOVE(keg, uk_link);
1566 mtx_unlock(&uma_mtx);
1567 zone_free_item(kegs, keg, NULL, SKIP_NONE,
1568 ZFREE_STATFREE);
1569 }
1570 }
1571
1572 /*
1573 * Traverses every zone in the system and calls a callback
1574 *
1575 * Arguments:
1576 * zfunc A pointer to a function which accepts a zone
1577 * as an argument.
1578 *
1579 * Returns:
1580 * Nothing
1581 */
1582 static void
1583 zone_foreach(void (*zfunc)(uma_zone_t))
1584 {
1585 uma_keg_t keg;
1586 uma_zone_t zone;
1587
1588 mtx_lock(&uma_mtx);
1589 LIST_FOREACH(keg, &uma_kegs, uk_link) {
1590 LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1591 zfunc(zone);
1592 }
1593 mtx_unlock(&uma_mtx);
1594 }
1595
1596 /* Public functions */
1597 /* See uma.h */
1598 void
1599 uma_startup(void *bootmem, int boot_pages)
1600 {
1601 struct uma_zctor_args args;
1602 uma_slab_t slab;
1603 u_int slabsize;
1604 u_int objsize, totsize, wsize;
1605 int i;
1606
1607 #ifdef UMA_DEBUG
1608 printf("Creating uma keg headers zone and keg.\n");
1609 #endif
1610 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1611
1612 /*
1613 * Figure out the maximum number of items-per-slab we'll have if
1614 * we're using the OFFPAGE slab header to track free items, given
1615 * all possible object sizes and the maximum desired wastage
1616 * (UMA_MAX_WASTE).
1617 *
1618 * We iterate until we find an object size for
1619 * which the calculated wastage in keg_small_init() will be
1620 * enough to warrant OFFPAGE. Since wastedspace versus objsize
1621 * is an overall increasing see-saw function, we find the smallest
1622 * objsize such that the wastage is always acceptable for objects
1623 * with that objsize or smaller. Since a smaller objsize always
1624 * generates a larger possible uma_max_ipers, we use this computed
1625 * objsize to calculate the largest ipers possible. Since the
1626 * ipers calculated for OFFPAGE slab headers is always larger than
1627 * the ipers initially calculated in keg_small_init(), we use
1628 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1629 * obtain the maximum ipers possible for offpage slab headers.
1630 *
1631 * It should be noted that ipers versus objsize is an inversly
1632 * proportional function which drops off rather quickly so as
1633 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1634 * falls into the portion of the inverse relation AFTER the steep
1635 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1636 *
1637 * Note that we have 8-bits (1 byte) to use as a freelist index
1638 * inside the actual slab header itself and this is enough to
1639 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
1640 * object with offpage slab header would have ipers =
1641 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1642 * 1 greater than what our byte-integer freelist index can
1643 * accomodate, but we know that this situation never occurs as
1644 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1645 * that we need to go to offpage slab headers. Or, if we do,
1646 * then we trap that condition below and panic in the INVARIANTS case.
1647 */
1648 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1649 totsize = wsize;
1650 objsize = UMA_SMALLEST_UNIT;
1651 while (totsize >= wsize) {
1652 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1653 (objsize + UMA_FRITM_SZ);
1654 totsize *= (UMA_FRITM_SZ + objsize);
1655 objsize++;
1656 }
1657 if (objsize > UMA_SMALLEST_UNIT)
1658 objsize--;
1659 uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
1660
1661 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1662 totsize = wsize;
1663 objsize = UMA_SMALLEST_UNIT;
1664 while (totsize >= wsize) {
1665 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1666 (objsize + UMA_FRITMREF_SZ);
1667 totsize *= (UMA_FRITMREF_SZ + objsize);
1668 objsize++;
1669 }
1670 if (objsize > UMA_SMALLEST_UNIT)
1671 objsize--;
1672 uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
1673
1674 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1675 ("uma_startup: calculated uma_max_ipers values too large!"));
1676
1677 #ifdef UMA_DEBUG
1678 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1679 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1680 uma_max_ipers_ref);
1681 #endif
1682
1683 /* "manually" create the initial zone */
1684 args.name = "UMA Kegs";
1685 args.size = sizeof(struct uma_keg);
1686 args.ctor = keg_ctor;
1687 args.dtor = keg_dtor;
1688 args.uminit = zero_init;
1689 args.fini = NULL;
1690 args.keg = &masterkeg;
1691 args.align = 32 - 1;
1692 args.flags = UMA_ZFLAG_INTERNAL;
1693 /* The initial zone has no Per cpu queues so it's smaller */
1694 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1695
1696 #ifdef UMA_DEBUG
1697 printf("Filling boot free list.\n");
1698 #endif
1699 for (i = 0; i < boot_pages; i++) {
1700 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1701 slab->us_data = (u_int8_t *)slab;
1702 slab->us_flags = UMA_SLAB_BOOT;
1703 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1704 }
1705 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1706
1707 #ifdef UMA_DEBUG
1708 printf("Creating uma zone headers zone and keg.\n");
1709 #endif
1710 args.name = "UMA Zones";
1711 args.size = sizeof(struct uma_zone) +
1712 (sizeof(struct uma_cache) * (mp_maxid + 1));
1713 args.ctor = zone_ctor;
1714 args.dtor = zone_dtor;
1715 args.uminit = zero_init;
1716 args.fini = NULL;
1717 args.keg = NULL;
1718 args.align = 32 - 1;
1719 args.flags = UMA_ZFLAG_INTERNAL;
1720 /* The initial zone has no Per cpu queues so it's smaller */
1721 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1722
1723 #ifdef UMA_DEBUG
1724 printf("Initializing pcpu cache locks.\n");
1725 #endif
1726 #ifdef UMA_DEBUG
1727 printf("Creating slab and hash zones.\n");
1728 #endif
1729
1730 /*
1731 * This is the max number of free list items we'll have with
1732 * offpage slabs.
1733 */
1734 slabsize = uma_max_ipers * UMA_FRITM_SZ;
1735 slabsize += sizeof(struct uma_slab);
1736
1737 /* Now make a zone for slab headers */
1738 slabzone = uma_zcreate("UMA Slabs",
1739 slabsize,
1740 NULL, NULL, NULL, NULL,
1741 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1742
1743 /*
1744 * We also create a zone for the bigger slabs with reference
1745 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1746 */
1747 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1748 slabsize += sizeof(struct uma_slab_refcnt);
1749 slabrefzone = uma_zcreate("UMA RCntSlabs",
1750 slabsize,
1751 NULL, NULL, NULL, NULL,
1752 UMA_ALIGN_PTR,
1753 UMA_ZFLAG_INTERNAL);
1754
1755 hashzone = uma_zcreate("UMA Hash",
1756 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1757 NULL, NULL, NULL, NULL,
1758 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1759
1760 bucket_init();
1761
1762 booted = UMA_STARTUP;
1763
1764 #ifdef UMA_DEBUG
1765 printf("UMA startup complete.\n");
1766 #endif
1767 }
1768
1769 /* see uma.h */
1770 void
1771 uma_startup2(void)
1772 {
1773 booted = UMA_STARTUP2;
1774 bucket_enable();
1775 #ifdef UMA_DEBUG
1776 printf("UMA startup2 complete.\n");
1777 #endif
1778 }
1779
1780 /*
1781 * Initialize our callout handle
1782 *
1783 */
1784
1785 static void
1786 uma_startup3(void)
1787 {
1788 #ifdef UMA_DEBUG
1789 printf("Starting callout.\n");
1790 #endif
1791 callout_init(&uma_callout, CALLOUT_MPSAFE);
1792 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1793 #ifdef UMA_DEBUG
1794 printf("UMA startup3 complete.\n");
1795 #endif
1796 }
1797
1798 static uma_keg_t
1799 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1800 int align, u_int32_t flags)
1801 {
1802 struct uma_kctor_args args;
1803
1804 args.size = size;
1805 args.uminit = uminit;
1806 args.fini = fini;
1807 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1808 args.flags = flags;
1809 args.zone = zone;
1810 return (zone_alloc_item(kegs, &args, M_WAITOK));
1811 }
1812
1813 /* See uma.h */
1814 void
1815 uma_set_align(int align)
1816 {
1817
1818 if (align != UMA_ALIGN_CACHE)
1819 uma_align_cache = align;
1820 }
1821
1822 /* See uma.h */
1823 uma_zone_t
1824 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1825 uma_init uminit, uma_fini fini, int align, u_int32_t flags)
1826
1827 {
1828 struct uma_zctor_args args;
1829
1830 /* This stuff is essential for the zone ctor */
1831 args.name = name;
1832 args.size = size;
1833 args.ctor = ctor;
1834 args.dtor = dtor;
1835 args.uminit = uminit;
1836 args.fini = fini;
1837 args.align = align;
1838 args.flags = flags;
1839 args.keg = NULL;
1840
1841 return (zone_alloc_item(zones, &args, M_WAITOK));
1842 }
1843
1844 /* See uma.h */
1845 uma_zone_t
1846 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1847 uma_init zinit, uma_fini zfini, uma_zone_t master)
1848 {
1849 struct uma_zctor_args args;
1850 uma_keg_t keg;
1851
1852 keg = zone_first_keg(master);
1853 args.name = name;
1854 args.size = keg->uk_size;
1855 args.ctor = ctor;
1856 args.dtor = dtor;
1857 args.uminit = zinit;
1858 args.fini = zfini;
1859 args.align = keg->uk_align;
1860 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1861 args.keg = keg;
1862
1863 /* XXX Attaches only one keg of potentially many. */
1864 return (zone_alloc_item(zones, &args, M_WAITOK));
1865 }
1866
1867 static void
1868 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1869 {
1870 if (a < b) {
1871 ZONE_LOCK(a);
1872 mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1873 } else {
1874 ZONE_LOCK(b);
1875 mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1876 }
1877 }
1878
1879 static void
1880 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1881 {
1882
1883 ZONE_UNLOCK(a);
1884 ZONE_UNLOCK(b);
1885 }
1886
1887 int
1888 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1889 {
1890 uma_klink_t klink;
1891 uma_klink_t kl;
1892 int error;
1893
1894 error = 0;
1895 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1896
1897 zone_lock_pair(zone, master);
1898 /*
1899 * zone must use vtoslab() to resolve objects and must already be
1900 * a secondary.
1901 */
1902 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1903 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1904 error = EINVAL;
1905 goto out;
1906 }
1907 /*
1908 * The new master must also use vtoslab().
1909 */
1910 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1911 error = EINVAL;
1912 goto out;
1913 }
1914 /*
1915 * Both must either be refcnt, or not be refcnt.
1916 */
1917 if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
1918 (master->uz_flags & UMA_ZONE_REFCNT)) {
1919 error = EINVAL;
1920 goto out;
1921 }
1922 /*
1923 * The underlying object must be the same size. rsize
1924 * may be different.
1925 */
1926 if (master->uz_size != zone->uz_size) {
1927 error = E2BIG;
1928 goto out;
1929 }
1930 /*
1931 * Put it at the end of the list.
1932 */
1933 klink->kl_keg = zone_first_keg(master);
1934 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1935 if (LIST_NEXT(kl, kl_link) == NULL) {
1936 LIST_INSERT_AFTER(kl, klink, kl_link);
1937 break;
1938 }
1939 }
1940 klink = NULL;
1941 zone->uz_flags |= UMA_ZFLAG_MULTI;
1942 zone->uz_slab = zone_fetch_slab_multi;
1943
1944 out:
1945 zone_unlock_pair(zone, master);
1946 if (klink != NULL)
1947 free(klink, M_TEMP);
1948
1949 return (error);
1950 }
1951
1952
1953 /* See uma.h */
1954 void
1955 uma_zdestroy(uma_zone_t zone)
1956 {
1957
1958 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1959 }
1960
1961 /* See uma.h */
1962 void *
1963 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1964 {
1965 void *item;
1966 uma_cache_t cache;
1967 uma_bucket_t bucket;
1968 int cpu;
1969
1970 /* This is the fast path allocation */
1971 #ifdef UMA_DEBUG_ALLOC_1
1972 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1973 #endif
1974 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1975 zone->uz_name, flags);
1976
1977 if (flags & M_WAITOK) {
1978 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1979 "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
1980 }
1981
1982 /*
1983 * If possible, allocate from the per-CPU cache. There are two
1984 * requirements for safe access to the per-CPU cache: (1) the thread
1985 * accessing the cache must not be preempted or yield during access,
1986 * and (2) the thread must not migrate CPUs without switching which
1987 * cache it accesses. We rely on a critical section to prevent
1988 * preemption and migration. We release the critical section in
1989 * order to acquire the zone mutex if we are unable to allocate from
1990 * the current cache; when we re-acquire the critical section, we
1991 * must detect and handle migration if it has occurred.
1992 */
1993 zalloc_restart:
1994 critical_enter();
1995 cpu = curcpu;
1996 cache = &zone->uz_cpu[cpu];
1997
1998 zalloc_start:
1999 bucket = cache->uc_allocbucket;
2000
2001 if (bucket) {
2002 if (bucket->ub_cnt > 0) {
2003 bucket->ub_cnt--;
2004 item = bucket->ub_bucket[bucket->ub_cnt];
2005 #ifdef INVARIANTS
2006 bucket->ub_bucket[bucket->ub_cnt] = NULL;
2007 #endif
2008 KASSERT(item != NULL,
2009 ("uma_zalloc: Bucket pointer mangled."));
2010 cache->uc_allocs++;
2011 critical_exit();
2012 #ifdef INVARIANTS
2013 ZONE_LOCK(zone);
2014 uma_dbg_alloc(zone, NULL, item);
2015 ZONE_UNLOCK(zone);
2016 #endif
2017 if (zone->uz_ctor != NULL) {
2018 if (zone->uz_ctor(item, zone->uz_size,
2019 udata, flags) != 0) {
2020 zone_free_item(zone, item, udata,
2021 SKIP_DTOR, ZFREE_STATFAIL |
2022 ZFREE_STATFREE);
2023 return (NULL);
2024 }
2025 }
2026 if (flags & M_ZERO)
2027 bzero(item, zone->uz_size);
2028 return (item);
2029 } else if (cache->uc_freebucket) {
2030 /*
2031 * We have run out of items in our allocbucket.
2032 * See if we can switch with our free bucket.
2033 */
2034 if (cache->uc_freebucket->ub_cnt > 0) {
2035 #ifdef UMA_DEBUG_ALLOC
2036 printf("uma_zalloc: Swapping empty with"
2037 " alloc.\n");
2038 #endif
2039 bucket = cache->uc_freebucket;
2040 cache->uc_freebucket = cache->uc_allocbucket;
2041 cache->uc_allocbucket = bucket;
2042
2043 goto zalloc_start;
2044 }
2045 }
2046 }
2047 /*
2048 * Attempt to retrieve the item from the per-CPU cache has failed, so
2049 * we must go back to the zone. This requires the zone lock, so we
2050 * must drop the critical section, then re-acquire it when we go back
2051 * to the cache. Since the critical section is released, we may be
2052 * preempted or migrate. As such, make sure not to maintain any
2053 * thread-local state specific to the cache from prior to releasing
2054 * the critical section.
2055 */
2056 critical_exit();
2057 ZONE_LOCK(zone);
2058 critical_enter();
2059 cpu = curcpu;
2060 cache = &zone->uz_cpu[cpu];
2061 bucket = cache->uc_allocbucket;
2062 if (bucket != NULL) {
2063 if (bucket->ub_cnt > 0) {
2064 ZONE_UNLOCK(zone);
2065 goto zalloc_start;
2066 }
2067 bucket = cache->uc_freebucket;
2068 if (bucket != NULL && bucket->ub_cnt > 0) {
2069 ZONE_UNLOCK(zone);
2070 goto zalloc_start;
2071 }
2072 }
2073
2074 /* Since we have locked the zone we may as well send back our stats */
2075 zone->uz_allocs += cache->uc_allocs;
2076 cache->uc_allocs = 0;
2077 zone->uz_frees += cache->uc_frees;
2078 cache->uc_frees = 0;
2079
2080 /* Our old one is now a free bucket */
2081 if (cache->uc_allocbucket) {
2082 KASSERT(cache->uc_allocbucket->ub_cnt == 0,
2083 ("uma_zalloc_arg: Freeing a non free bucket."));
2084 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2085 cache->uc_allocbucket, ub_link);
2086 cache->uc_allocbucket = NULL;
2087 }
2088
2089 /* Check the free list for a new alloc bucket */
2090 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
2091 KASSERT(bucket->ub_cnt != 0,
2092 ("uma_zalloc_arg: Returning an empty bucket."));
2093
2094 LIST_REMOVE(bucket, ub_link);
2095 cache->uc_allocbucket = bucket;
2096 ZONE_UNLOCK(zone);
2097 goto zalloc_start;
2098 }
2099 /* We are no longer associated with this CPU. */
2100 critical_exit();
2101
2102 /* Bump up our uz_count so we get here less */
2103 if (zone->uz_count < BUCKET_MAX)
2104 zone->uz_count++;
2105
2106 /*
2107 * Now lets just fill a bucket and put it on the free list. If that
2108 * works we'll restart the allocation from the begining.
2109 */
2110 if (zone_alloc_bucket(zone, flags)) {
2111 ZONE_UNLOCK(zone);
2112 goto zalloc_restart;
2113 }
2114 ZONE_UNLOCK(zone);
2115 /*
2116 * We may not be able to get a bucket so return an actual item.
2117 */
2118 #ifdef UMA_DEBUG
2119 printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2120 #endif
2121
2122 item = zone_alloc_item(zone, udata, flags);
2123 return (item);
2124 }
2125
2126 static uma_slab_t
2127 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2128 {
2129 uma_slab_t slab;
2130
2131 mtx_assert(&keg->uk_lock, MA_OWNED);
2132 slab = NULL;
2133
2134 for (;;) {
2135 /*
2136 * Find a slab with some space. Prefer slabs that are partially
2137 * used over those that are totally full. This helps to reduce
2138 * fragmentation.
2139 */
2140 if (keg->uk_free != 0) {
2141 if (!LIST_EMPTY(&keg->uk_part_slab)) {
2142 slab = LIST_FIRST(&keg->uk_part_slab);
2143 } else {
2144 slab = LIST_FIRST(&keg->uk_free_slab);
2145 LIST_REMOVE(slab, us_link);
2146 LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2147 us_link);
2148 }
2149 MPASS(slab->us_keg == keg);
2150 return (slab);
2151 }
2152
2153 /*
2154 * M_NOVM means don't ask at all!
2155 */
2156 if (flags & M_NOVM)
2157 break;
2158
2159 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2160 keg->uk_flags |= UMA_ZFLAG_FULL;
2161 /*
2162 * If this is not a multi-zone, set the FULL bit.
2163 * Otherwise slab_multi() takes care of it.
2164 */
2165 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
2166 zone->uz_flags |= UMA_ZFLAG_FULL;
2167 if (flags & M_NOWAIT)
2168 break;
2169 zone->uz_sleeps++;
2170 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2171 continue;
2172 }
2173 keg->uk_recurse++;
2174 slab = keg_alloc_slab(keg, zone, flags);
2175 keg->uk_recurse--;
2176 /*
2177 * If we got a slab here it's safe to mark it partially used
2178 * and return. We assume that the caller is going to remove
2179 * at least one item.
2180 */
2181 if (slab) {
2182 MPASS(slab->us_keg == keg);
2183 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2184 return (slab);
2185 }
2186 /*
2187 * We might not have been able to get a slab but another cpu
2188 * could have while we were unlocked. Check again before we
2189 * fail.
2190 */
2191 flags |= M_NOVM;
2192 }
2193 return (slab);
2194 }
2195
2196 static inline void
2197 zone_relock(uma_zone_t zone, uma_keg_t keg)
2198 {
2199 if (zone->uz_lock != &keg->uk_lock) {
2200 KEG_UNLOCK(keg);
2201 ZONE_LOCK(zone);
2202 }
2203 }
2204
2205 static inline void
2206 keg_relock(uma_keg_t keg, uma_zone_t zone)
2207 {
2208 if (zone->uz_lock != &keg->uk_lock) {
2209 ZONE_UNLOCK(zone);
2210 KEG_LOCK(keg);
2211 }
2212 }
2213
2214 static uma_slab_t
2215 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2216 {
2217 uma_slab_t slab;
2218
2219 if (keg == NULL)
2220 keg = zone_first_keg(zone);
2221 /*
2222 * This is to prevent us from recursively trying to allocate
2223 * buckets. The problem is that if an allocation forces us to
2224 * grab a new bucket we will call page_alloc, which will go off
2225 * and cause the vm to allocate vm_map_entries. If we need new
2226 * buckets there too we will recurse in kmem_alloc and bad
2227 * things happen. So instead we return a NULL bucket, and make
2228 * the code that allocates buckets smart enough to deal with it
2229 */
2230 if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
2231 return (NULL);
2232
2233 for (;;) {
2234 slab = keg_fetch_slab(keg, zone, flags);
2235 if (slab)
2236 return (slab);
2237 if (flags & (M_NOWAIT | M_NOVM))
2238 break;
2239 }
2240 return (NULL);
2241 }
2242
2243 /*
2244 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
2245 * with the keg locked. Caller must call zone_relock() afterwards if the
2246 * zone lock is required. On NULL the zone lock is held.
2247 *
2248 * The last pointer is used to seed the search. It is not required.
2249 */
2250 static uma_slab_t
2251 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2252 {
2253 uma_klink_t klink;
2254 uma_slab_t slab;
2255 uma_keg_t keg;
2256 int flags;
2257 int empty;
2258 int full;
2259
2260 /*
2261 * Don't wait on the first pass. This will skip limit tests
2262 * as well. We don't want to block if we can find a provider
2263 * without blocking.
2264 */
2265 flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2266 /*
2267 * Use the last slab allocated as a hint for where to start
2268 * the search.
2269 */
2270 if (last) {
2271 slab = keg_fetch_slab(last, zone, flags);
2272 if (slab)
2273 return (slab);
2274 zone_relock(zone, last);
2275 last = NULL;
2276 }
2277 /*
2278 * Loop until we have a slab incase of transient failures
2279 * while M_WAITOK is specified. I'm not sure this is 100%
2280 * required but we've done it for so long now.
2281 */
2282 for (;;) {
2283 empty = 0;
2284 full = 0;
2285 /*
2286 * Search the available kegs for slabs. Be careful to hold the
2287 * correct lock while calling into the keg layer.
2288 */
2289 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2290 keg = klink->kl_keg;
2291 keg_relock(keg, zone);
2292 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2293 slab = keg_fetch_slab(keg, zone, flags);
2294 if (slab)
2295 return (slab);
2296 }
2297 if (keg->uk_flags & UMA_ZFLAG_FULL)
2298 full++;
2299 else
2300 empty++;
2301 zone_relock(zone, keg);
2302 }
2303 if (rflags & (M_NOWAIT | M_NOVM))
2304 break;
2305 flags = rflags;
2306 /*
2307 * All kegs are full. XXX We can't atomically check all kegs
2308 * and sleep so just sleep for a short period and retry.
2309 */
2310 if (full && !empty) {
2311 zone->uz_flags |= UMA_ZFLAG_FULL;
2312 zone->uz_sleeps++;
2313 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2314 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2315 continue;
2316 }
2317 }
2318 return (NULL);
2319 }
2320
2321 static void *
2322 slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2323 {
2324 uma_keg_t keg;
2325 uma_slabrefcnt_t slabref;
2326 void *item;
2327 u_int8_t freei;
2328
2329 keg = slab->us_keg;
2330 mtx_assert(&keg->uk_lock, MA_OWNED);
2331
2332 freei = slab->us_firstfree;
2333 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2334 slabref = (uma_slabrefcnt_t)slab;
2335 slab->us_firstfree = slabref->us_freelist[freei].us_item;
2336 } else {
2337 slab->us_firstfree = slab->us_freelist[freei].us_item;
2338 }
2339 item = slab->us_data + (keg->uk_rsize * freei);
2340
2341 slab->us_freecount--;
2342 keg->uk_free--;
2343 #ifdef INVARIANTS
2344 uma_dbg_alloc(zone, slab, item);
2345 #endif
2346 /* Move this slab to the full list */
2347 if (slab->us_freecount == 0) {
2348 LIST_REMOVE(slab, us_link);
2349 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2350 }
2351
2352 return (item);
2353 }
2354
2355 static int
2356 zone_alloc_bucket(uma_zone_t zone, int flags)
2357 {
2358 uma_bucket_t bucket;
2359 uma_slab_t slab;
2360 uma_keg_t keg;
2361 int16_t saved;
2362 int max, origflags = flags;
2363
2364 /*
2365 * Try this zone's free list first so we don't allocate extra buckets.
2366 */
2367 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2368 KASSERT(bucket->ub_cnt == 0,
2369 ("zone_alloc_bucket: Bucket on free list is not empty."));
2370 LIST_REMOVE(bucket, ub_link);
2371 } else {
2372 int bflags;
2373
2374 bflags = (flags & ~M_ZERO);
2375 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2376 bflags |= M_NOVM;
2377
2378 ZONE_UNLOCK(zone);
2379 bucket = bucket_alloc(zone->uz_count, bflags);
2380 ZONE_LOCK(zone);
2381 }
2382
2383 if (bucket == NULL) {
2384 return (0);
2385 }
2386
2387 #ifdef SMP
2388 /*
2389 * This code is here to limit the number of simultaneous bucket fills
2390 * for any given zone to the number of per cpu caches in this zone. This
2391 * is done so that we don't allocate more memory than we really need.
2392 */
2393 if (zone->uz_fills >= mp_ncpus)
2394 goto done;
2395
2396 #endif
2397 zone->uz_fills++;
2398
2399 max = MIN(bucket->ub_entries, zone->uz_count);
2400 /* Try to keep the buckets totally full */
2401 saved = bucket->ub_cnt;
2402 slab = NULL;
2403 keg = NULL;
2404 while (bucket->ub_cnt < max &&
2405 (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
2406 keg = slab->us_keg;
2407 while (slab->us_freecount && bucket->ub_cnt < max) {
2408 bucket->ub_bucket[bucket->ub_cnt++] =
2409 slab_alloc_item(zone, slab);
2410 }
2411
2412 /* Don't block on the next fill */
2413 flags |= M_NOWAIT;
2414 }
2415 if (slab)
2416 zone_relock(zone, keg);
2417
2418 /*
2419 * We unlock here because we need to call the zone's init.
2420 * It should be safe to unlock because the slab dealt with
2421 * above is already on the appropriate list within the keg
2422 * and the bucket we filled is not yet on any list, so we
2423 * own it.
2424 */
2425 if (zone->uz_init != NULL) {
2426 int i;
2427
2428 ZONE_UNLOCK(zone);
2429 for (i = saved; i < bucket->ub_cnt; i++)
2430 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2431 origflags) != 0)
2432 break;
2433 /*
2434 * If we couldn't initialize the whole bucket, put the
2435 * rest back onto the freelist.
2436 */
2437 if (i != bucket->ub_cnt) {
2438 int j;
2439
2440 for (j = i; j < bucket->ub_cnt; j++) {
2441 zone_free_item(zone, bucket->ub_bucket[j],
2442 NULL, SKIP_FINI, 0);
2443 #ifdef INVARIANTS
2444 bucket->ub_bucket[j] = NULL;
2445 #endif
2446 }
2447 bucket->ub_cnt = i;
2448 }
2449 ZONE_LOCK(zone);
2450 }
2451
2452 zone->uz_fills--;
2453 if (bucket->ub_cnt != 0) {
2454 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2455 bucket, ub_link);
2456 return (1);
2457 }
2458 #ifdef SMP
2459 done:
2460 #endif
2461 bucket_free(bucket);
2462
2463 return (0);
2464 }
2465 /*
2466 * Allocates an item for an internal zone
2467 *
2468 * Arguments
2469 * zone The zone to alloc for.
2470 * udata The data to be passed to the constructor.
2471 * flags M_WAITOK, M_NOWAIT, M_ZERO.
2472 *
2473 * Returns
2474 * NULL if there is no memory and M_NOWAIT is set
2475 * An item if successful
2476 */
2477
2478 static void *
2479 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2480 {
2481 uma_slab_t slab;
2482 void *item;
2483
2484 item = NULL;
2485
2486 #ifdef UMA_DEBUG_ALLOC
2487 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2488 #endif
2489 ZONE_LOCK(zone);
2490
2491 slab = zone->uz_slab(zone, NULL, flags);
2492 if (slab == NULL) {
2493 zone->uz_fails++;
2494 ZONE_UNLOCK(zone);
2495 return (NULL);
2496 }
2497
2498 item = slab_alloc_item(zone, slab);
2499
2500 zone_relock(zone, slab->us_keg);
2501 zone->uz_allocs++;
2502 ZONE_UNLOCK(zone);
2503
2504 /*
2505 * We have to call both the zone's init (not the keg's init)
2506 * and the zone's ctor. This is because the item is going from
2507 * a keg slab directly to the user, and the user is expecting it
2508 * to be both zone-init'd as well as zone-ctor'd.
2509 */
2510 if (zone->uz_init != NULL) {
2511 if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2512 zone_free_item(zone, item, udata, SKIP_FINI,
2513 ZFREE_STATFAIL | ZFREE_STATFREE);
2514 return (NULL);
2515 }
2516 }
2517 if (zone->uz_ctor != NULL) {
2518 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2519 zone_free_item(zone, item, udata, SKIP_DTOR,
2520 ZFREE_STATFAIL | ZFREE_STATFREE);
2521 return (NULL);
2522 }
2523 }
2524 if (flags & M_ZERO)
2525 bzero(item, zone->uz_size);
2526
2527 return (item);
2528 }
2529
2530 /* See uma.h */
2531 void
2532 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2533 {
2534 uma_cache_t cache;
2535 uma_bucket_t bucket;
2536 int bflags;
2537 int cpu;
2538
2539 #ifdef UMA_DEBUG_ALLOC_1
2540 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2541 #endif
2542 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2543 zone->uz_name);
2544
2545 /* uma_zfree(..., NULL) does nothing, to match free(9). */
2546 if (item == NULL)
2547 return;
2548
2549 if (zone->uz_dtor)
2550 zone->uz_dtor(item, zone->uz_size, udata);
2551
2552 #ifdef INVARIANTS
2553 ZONE_LOCK(zone);
2554 if (zone->uz_flags & UMA_ZONE_MALLOC)
2555 uma_dbg_free(zone, udata, item);
2556 else
2557 uma_dbg_free(zone, NULL, item);
2558 ZONE_UNLOCK(zone);
2559 #endif
2560 /*
2561 * The race here is acceptable. If we miss it we'll just have to wait
2562 * a little longer for the limits to be reset.
2563 */
2564 if (zone->uz_flags & UMA_ZFLAG_FULL)
2565 goto zfree_internal;
2566
2567 /*
2568 * If possible, free to the per-CPU cache. There are two
2569 * requirements for safe access to the per-CPU cache: (1) the thread
2570 * accessing the cache must not be preempted or yield during access,
2571 * and (2) the thread must not migrate CPUs without switching which
2572 * cache it accesses. We rely on a critical section to prevent
2573 * preemption and migration. We release the critical section in
2574 * order to acquire the zone mutex if we are unable to free to the
2575 * current cache; when we re-acquire the critical section, we must
2576 * detect and handle migration if it has occurred.
2577 */
2578 zfree_restart:
2579 critical_enter();
2580 cpu = curcpu;
2581 cache = &zone->uz_cpu[cpu];
2582
2583 zfree_start:
2584 bucket = cache->uc_freebucket;
2585
2586 if (bucket) {
2587 /*
2588 * Do we have room in our bucket? It is OK for this uz count
2589 * check to be slightly out of sync.
2590 */
2591
2592 if (bucket->ub_cnt < bucket->ub_entries) {
2593 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2594 ("uma_zfree: Freeing to non free bucket index."));
2595 bucket->ub_bucket[bucket->ub_cnt] = item;
2596 bucket->ub_cnt++;
2597 cache->uc_frees++;
2598 critical_exit();
2599 return;
2600 } else if (cache->uc_allocbucket) {
2601 #ifdef UMA_DEBUG_ALLOC
2602 printf("uma_zfree: Swapping buckets.\n");
2603 #endif
2604 /*
2605 * We have run out of space in our freebucket.
2606 * See if we can switch with our alloc bucket.
2607 */
2608 if (cache->uc_allocbucket->ub_cnt <
2609 cache->uc_freebucket->ub_cnt) {
2610 bucket = cache->uc_freebucket;
2611 cache->uc_freebucket = cache->uc_allocbucket;
2612 cache->uc_allocbucket = bucket;
2613 goto zfree_start;
2614 }
2615 }
2616 }
2617 /*
2618 * We can get here for two reasons:
2619 *
2620 * 1) The buckets are NULL
2621 * 2) The alloc and free buckets are both somewhat full.
2622 *
2623 * We must go back the zone, which requires acquiring the zone lock,
2624 * which in turn means we must release and re-acquire the critical
2625 * section. Since the critical section is released, we may be
2626 * preempted or migrate. As such, make sure not to maintain any
2627 * thread-local state specific to the cache from prior to releasing
2628 * the critical section.
2629 */
2630 critical_exit();
2631 ZONE_LOCK(zone);
2632 critical_enter();
2633 cpu = curcpu;
2634 cache = &zone->uz_cpu[cpu];
2635 if (cache->uc_freebucket != NULL) {
2636 if (cache->uc_freebucket->ub_cnt <
2637 cache->uc_freebucket->ub_entries) {
2638 ZONE_UNLOCK(zone);
2639 goto zfree_start;
2640 }
2641 if (cache->uc_allocbucket != NULL &&
2642 (cache->uc_allocbucket->ub_cnt <
2643 cache->uc_freebucket->ub_cnt)) {
2644 ZONE_UNLOCK(zone);
2645 goto zfree_start;
2646 }
2647 }
2648
2649 /* Since we have locked the zone we may as well send back our stats */
2650 zone->uz_allocs += cache->uc_allocs;
2651 cache->uc_allocs = 0;
2652 zone->uz_frees += cache->uc_frees;
2653 cache->uc_frees = 0;
2654
2655 bucket = cache->uc_freebucket;
2656 cache->uc_freebucket = NULL;
2657
2658 /* Can we throw this on the zone full list? */
2659 if (bucket != NULL) {
2660 #ifdef UMA_DEBUG_ALLOC
2661 printf("uma_zfree: Putting old bucket on the free list.\n");
2662 #endif
2663 /* ub_cnt is pointing to the last free item */
2664 KASSERT(bucket->ub_cnt != 0,
2665 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2666 LIST_INSERT_HEAD(&zone->uz_full_bucket,
2667 bucket, ub_link);
2668 }
2669 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2670 LIST_REMOVE(bucket, ub_link);
2671 ZONE_UNLOCK(zone);
2672 cache->uc_freebucket = bucket;
2673 goto zfree_start;
2674 }
2675 /* We are no longer associated with this CPU. */
2676 critical_exit();
2677
2678 /* And the zone.. */
2679 ZONE_UNLOCK(zone);
2680
2681 #ifdef UMA_DEBUG_ALLOC
2682 printf("uma_zfree: Allocating new free bucket.\n");
2683 #endif
2684 bflags = M_NOWAIT;
2685
2686 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2687 bflags |= M_NOVM;
2688 bucket = bucket_alloc(zone->uz_count, bflags);
2689 if (bucket) {
2690 ZONE_LOCK(zone);
2691 LIST_INSERT_HEAD(&zone->uz_free_bucket,
2692 bucket, ub_link);
2693 ZONE_UNLOCK(zone);
2694 goto zfree_restart;
2695 }
2696
2697 /*
2698 * If nothing else caught this, we'll just do an internal free.
2699 */
2700 zfree_internal:
2701 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2702
2703 return;
2704 }
2705
2706 /*
2707 * Frees an item to an INTERNAL zone or allocates a free bucket
2708 *
2709 * Arguments:
2710 * zone The zone to free to
2711 * item The item we're freeing
2712 * udata User supplied data for the dtor
2713 * skip Skip dtors and finis
2714 */
2715 static void
2716 zone_free_item(uma_zone_t zone, void *item, void *udata,
2717 enum zfreeskip skip, int flags)
2718 {
2719 uma_slab_t slab;
2720 uma_slabrefcnt_t slabref;
2721 uma_keg_t keg;
2722 u_int8_t *mem;
2723 u_int8_t freei;
2724 int clearfull;
2725
2726 if (skip < SKIP_DTOR && zone->uz_dtor)
2727 zone->uz_dtor(item, zone->uz_size, udata);
2728
2729 if (skip < SKIP_FINI && zone->uz_fini)
2730 zone->uz_fini(item, zone->uz_size);
2731
2732 ZONE_LOCK(zone);
2733
2734 if (flags & ZFREE_STATFAIL)
2735 zone->uz_fails++;
2736 if (flags & ZFREE_STATFREE)
2737 zone->uz_frees++;
2738
2739 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2740 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2741 keg = zone_first_keg(zone); /* Must only be one. */
2742 if (zone->uz_flags & UMA_ZONE_HASH) {
2743 slab = hash_sfind(&keg->uk_hash, mem);
2744 } else {
2745 mem += keg->uk_pgoff;
2746 slab = (uma_slab_t)mem;
2747 }
2748 } else {
2749 /* This prevents redundant lookups via free(). */
2750 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
2751 slab = (uma_slab_t)udata;
2752 else
2753 slab = vtoslab((vm_offset_t)item);
2754 keg = slab->us_keg;
2755 keg_relock(keg, zone);
2756 }
2757 MPASS(keg == slab->us_keg);
2758
2759 /* Do we need to remove from any lists? */
2760 if (slab->us_freecount+1 == keg->uk_ipers) {
2761 LIST_REMOVE(slab, us_link);
2762 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2763 } else if (slab->us_freecount == 0) {
2764 LIST_REMOVE(slab, us_link);
2765 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2766 }
2767
2768 /* Slab management stuff */
2769 freei = ((unsigned long)item - (unsigned long)slab->us_data)
2770 / keg->uk_rsize;
2771
2772 #ifdef INVARIANTS
2773 if (!skip)
2774 uma_dbg_free(zone, slab, item);
2775 #endif
2776
2777 if (keg->uk_flags & UMA_ZONE_REFCNT) {
2778 slabref = (uma_slabrefcnt_t)slab;
2779 slabref->us_freelist[freei].us_item = slab->us_firstfree;
2780 } else {
2781 slab->us_freelist[freei].us_item = slab->us_firstfree;
2782 }
2783 slab->us_firstfree = freei;
2784 slab->us_freecount++;
2785
2786 /* Zone statistics */
2787 keg->uk_free++;
2788
2789 clearfull = 0;
2790 if (keg->uk_flags & UMA_ZFLAG_FULL) {
2791 if (keg->uk_pages < keg->uk_maxpages) {
2792 keg->uk_flags &= ~UMA_ZFLAG_FULL;
2793 clearfull = 1;
2794 }
2795
2796 /*
2797 * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
2798 * wake up all procs blocked on pages. This should be uncommon, so
2799 * keeping this simple for now (rather than adding count of blocked
2800 * threads etc).
2801 */
2802 wakeup(keg);
2803 }
2804 if (clearfull) {
2805 zone_relock(zone, keg);
2806 zone->uz_flags &= ~UMA_ZFLAG_FULL;
2807 wakeup(zone);
2808 ZONE_UNLOCK(zone);
2809 } else
2810 KEG_UNLOCK(keg);
2811 }
2812
2813 /* See uma.h */
2814 int
2815 uma_zone_set_max(uma_zone_t zone, int nitems)
2816 {
2817 uma_keg_t keg;
2818
2819 ZONE_LOCK(zone);
2820 keg = zone_first_keg(zone);
2821 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2822 if (keg->uk_maxpages * keg->uk_ipers < nitems)
2823 keg->uk_maxpages += keg->uk_ppera;
2824 nitems = keg->uk_maxpages * keg->uk_ipers;
2825 ZONE_UNLOCK(zone);
2826
2827 return (nitems);
2828 }
2829
2830 /* See uma.h */
2831 int
2832 uma_zone_get_max(uma_zone_t zone)
2833 {
2834 int nitems;
2835 uma_keg_t keg;
2836
2837 ZONE_LOCK(zone);
2838 keg = zone_first_keg(zone);
2839 nitems = keg->uk_maxpages * keg->uk_ipers;
2840 ZONE_UNLOCK(zone);
2841
2842 return (nitems);
2843 }
2844
2845 /* See uma.h */
2846 int
2847 uma_zone_get_cur(uma_zone_t zone)
2848 {
2849 int64_t nitems;
2850 u_int i;
2851
2852 ZONE_LOCK(zone);
2853 nitems = zone->uz_allocs - zone->uz_frees;
2854 CPU_FOREACH(i) {
2855 /*
2856 * See the comment in sysctl_vm_zone_stats() regarding the
2857 * safety of accessing the per-cpu caches. With the zone lock
2858 * held, it is safe, but can potentially result in stale data.
2859 */
2860 nitems += zone->uz_cpu[i].uc_allocs -
2861 zone->uz_cpu[i].uc_frees;
2862 }
2863 ZONE_UNLOCK(zone);
2864
2865 return (nitems < 0 ? 0 : nitems);
2866 }
2867
2868 /* See uma.h */
2869 void
2870 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2871 {
2872 uma_keg_t keg;
2873
2874 ZONE_LOCK(zone);
2875 keg = zone_first_keg(zone);
2876 KASSERT(keg->uk_pages == 0,
2877 ("uma_zone_set_init on non-empty keg"));
2878 keg->uk_init = uminit;
2879 ZONE_UNLOCK(zone);
2880 }
2881
2882 /* See uma.h */
2883 void
2884 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2885 {
2886 uma_keg_t keg;
2887
2888 ZONE_LOCK(zone);
2889 keg = zone_first_keg(zone);
2890 KASSERT(keg->uk_pages == 0,
2891 ("uma_zone_set_fini on non-empty keg"));
2892 keg->uk_fini = fini;
2893 ZONE_UNLOCK(zone);
2894 }
2895
2896 /* See uma.h */
2897 void
2898 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2899 {
2900 ZONE_LOCK(zone);
2901 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2902 ("uma_zone_set_zinit on non-empty keg"));
2903 zone->uz_init = zinit;
2904 ZONE_UNLOCK(zone);
2905 }
2906
2907 /* See uma.h */
2908 void
2909 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2910 {
2911 ZONE_LOCK(zone);
2912 KASSERT(zone_first_keg(zone)->uk_pages == 0,
2913 ("uma_zone_set_zfini on non-empty keg"));
2914 zone->uz_fini = zfini;
2915 ZONE_UNLOCK(zone);
2916 }
2917
2918 /* See uma.h */
2919 /* XXX uk_freef is not actually used with the zone locked */
2920 void
2921 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2922 {
2923
2924 ZONE_LOCK(zone);
2925 zone_first_keg(zone)->uk_freef = freef;
2926 ZONE_UNLOCK(zone);
2927 }
2928
2929 /* See uma.h */
2930 /* XXX uk_allocf is not actually used with the zone locked */
2931 void
2932 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2933 {
2934 uma_keg_t keg;
2935
2936 ZONE_LOCK(zone);
2937 keg = zone_first_keg(zone);
2938 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2939 keg->uk_allocf = allocf;
2940 ZONE_UNLOCK(zone);
2941 }
2942
2943 /* See uma.h */
2944 int
2945 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2946 {
2947 uma_keg_t keg;
2948 vm_offset_t kva;
2949 int pages;
2950
2951 keg = zone_first_keg(zone);
2952 pages = count / keg->uk_ipers;
2953
2954 if (pages * keg->uk_ipers < count)
2955 pages++;
2956
2957 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2958
2959 if (kva == 0)
2960 return (0);
2961 if (obj == NULL)
2962 obj = vm_object_allocate(OBJT_PHYS, pages);
2963 else {
2964 VM_OBJECT_LOCK_INIT(obj, "uma object");
2965 _vm_object_allocate(OBJT_PHYS, pages, obj);
2966 }
2967 ZONE_LOCK(zone);
2968 keg->uk_kva = kva;
2969 keg->uk_obj = obj;
2970 keg->uk_maxpages = pages;
2971 keg->uk_allocf = obj_alloc;
2972 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2973 ZONE_UNLOCK(zone);
2974 return (1);
2975 }
2976
2977 /* See uma.h */
2978 void
2979 uma_prealloc(uma_zone_t zone, int items)
2980 {
2981 int slabs;
2982 uma_slab_t slab;
2983 uma_keg_t keg;
2984
2985 keg = zone_first_keg(zone);
2986 ZONE_LOCK(zone);
2987 slabs = items / keg->uk_ipers;
2988 if (slabs * keg->uk_ipers < items)
2989 slabs++;
2990 while (slabs > 0) {
2991 slab = keg_alloc_slab(keg, zone, M_WAITOK);
2992 if (slab == NULL)
2993 break;
2994 MPASS(slab->us_keg == keg);
2995 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2996 slabs--;
2997 }
2998 ZONE_UNLOCK(zone);
2999 }
3000
3001 /* See uma.h */
3002 u_int32_t *
3003 uma_find_refcnt(uma_zone_t zone, void *item)
3004 {
3005 uma_slabrefcnt_t slabref;
3006 uma_keg_t keg;
3007 u_int32_t *refcnt;
3008 int idx;
3009
3010 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
3011 (~UMA_SLAB_MASK));
3012 keg = slabref->us_keg;
3013 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
3014 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3015 idx = ((unsigned long)item - (unsigned long)slabref->us_data)
3016 / keg->uk_rsize;
3017 refcnt = &slabref->us_freelist[idx].us_refcnt;
3018 return refcnt;
3019 }
3020
3021 /* See uma.h */
3022 void
3023 uma_reclaim(void)
3024 {
3025 #ifdef UMA_DEBUG
3026 printf("UMA: vm asked us to release pages!\n");
3027 #endif
3028 bucket_enable();
3029 zone_foreach(zone_drain);
3030 /*
3031 * Some slabs may have been freed but this zone will be visited early
3032 * we visit again so that we can free pages that are empty once other
3033 * zones are drained. We have to do the same for buckets.
3034 */
3035 zone_drain(slabzone);
3036 zone_drain(slabrefzone);
3037 bucket_zone_drain();
3038 }
3039
3040 /* See uma.h */
3041 int
3042 uma_zone_exhausted(uma_zone_t zone)
3043 {
3044 int full;
3045
3046 ZONE_LOCK(zone);
3047 full = (zone->uz_flags & UMA_ZFLAG_FULL);
3048 ZONE_UNLOCK(zone);
3049 return (full);
3050 }
3051
3052 int
3053 uma_zone_exhausted_nolock(uma_zone_t zone)
3054 {
3055 return (zone->uz_flags & UMA_ZFLAG_FULL);
3056 }
3057
3058 void *
3059 uma_large_malloc(int size, int wait)
3060 {
3061 void *mem;
3062 uma_slab_t slab;
3063 u_int8_t flags;
3064
3065 slab = zone_alloc_item(slabzone, NULL, wait);
3066 if (slab == NULL)
3067 return (NULL);
3068 mem = page_alloc(NULL, size, &flags, wait);
3069 if (mem) {
3070 vsetslab((vm_offset_t)mem, slab);
3071 slab->us_data = mem;
3072 slab->us_flags = flags | UMA_SLAB_MALLOC;
3073 slab->us_size = size;
3074 } else {
3075 zone_free_item(slabzone, slab, NULL, SKIP_NONE,
3076 ZFREE_STATFAIL | ZFREE_STATFREE);
3077 }
3078
3079 return (mem);
3080 }
3081
3082 void
3083 uma_large_free(uma_slab_t slab)
3084 {
3085 vsetobj((vm_offset_t)slab->us_data, kmem_object);
3086 page_free(slab->us_data, slab->us_size, slab->us_flags);
3087 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
3088 }
3089
3090 void
3091 uma_print_stats(void)
3092 {
3093 zone_foreach(uma_print_zone);
3094 }
3095
3096 static void
3097 slab_print(uma_slab_t slab)
3098 {
3099 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
3100 slab->us_keg, slab->us_data, slab->us_freecount,
3101 slab->us_firstfree);
3102 }
3103
3104 static void
3105 cache_print(uma_cache_t cache)
3106 {
3107 printf("alloc: %p(%d), free: %p(%d)\n",
3108 cache->uc_allocbucket,
3109 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3110 cache->uc_freebucket,
3111 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3112 }
3113
3114 static void
3115 uma_print_keg(uma_keg_t keg)
3116 {
3117 uma_slab_t slab;
3118
3119 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3120 "out %d free %d limit %d\n",
3121 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3122 keg->uk_ipers, keg->uk_ppera,
3123 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3124 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3125 printf("Part slabs:\n");
3126 LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3127 slab_print(slab);
3128 printf("Free slabs:\n");
3129 LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3130 slab_print(slab);
3131 printf("Full slabs:\n");
3132 LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3133 slab_print(slab);
3134 }
3135
3136 void
3137 uma_print_zone(uma_zone_t zone)
3138 {
3139 uma_cache_t cache;
3140 uma_klink_t kl;
3141 int i;
3142
3143 printf("zone: %s(%p) size %d flags %#x\n",
3144 zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3145 LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3146 uma_print_keg(kl->kl_keg);
3147 CPU_FOREACH(i) {
3148 cache = &zone->uz_cpu[i];
3149 printf("CPU %d Cache:\n", i);
3150 cache_print(cache);
3151 }
3152 }
3153
3154 #ifdef DDB
3155 /*
3156 * Generate statistics across both the zone and its per-cpu cache's. Return
3157 * desired statistics if the pointer is non-NULL for that statistic.
3158 *
3159 * Note: does not update the zone statistics, as it can't safely clear the
3160 * per-CPU cache statistic.
3161 *
3162 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3163 * safe from off-CPU; we should modify the caches to track this information
3164 * directly so that we don't have to.
3165 */
3166 static void
3167 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
3168 u_int64_t *freesp, u_int64_t *sleepsp)
3169 {
3170 uma_cache_t cache;
3171 u_int64_t allocs, frees, sleeps;
3172 int cachefree, cpu;
3173
3174 allocs = frees = sleeps = 0;
3175 cachefree = 0;
3176 CPU_FOREACH(cpu) {
3177 cache = &z->uz_cpu[cpu];
3178 if (cache->uc_allocbucket != NULL)
3179 cachefree += cache->uc_allocbucket->ub_cnt;
3180 if (cache->uc_freebucket != NULL)
3181 cachefree += cache->uc_freebucket->ub_cnt;
3182 allocs += cache->uc_allocs;
3183 frees += cache->uc_frees;
3184 }
3185 allocs += z->uz_allocs;
3186 frees += z->uz_frees;
3187 sleeps += z->uz_sleeps;
3188 if (cachefreep != NULL)
3189 *cachefreep = cachefree;
3190 if (allocsp != NULL)
3191 *allocsp = allocs;
3192 if (freesp != NULL)
3193 *freesp = frees;
3194 if (sleepsp != NULL)
3195 *sleepsp = sleeps;
3196 }
3197 #endif /* DDB */
3198
3199 static int
3200 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3201 {
3202 uma_keg_t kz;
3203 uma_zone_t z;
3204 int count;
3205
3206 count = 0;
3207 mtx_lock(&uma_mtx);
3208 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3209 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3210 count++;
3211 }
3212 mtx_unlock(&uma_mtx);
3213 return (sysctl_handle_int(oidp, &count, 0, req));
3214 }
3215
3216 static int
3217 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3218 {
3219 struct uma_stream_header ush;
3220 struct uma_type_header uth;
3221 struct uma_percpu_stat ups;
3222 uma_bucket_t bucket;
3223 struct sbuf sbuf;
3224 uma_cache_t cache;
3225 uma_klink_t kl;
3226 uma_keg_t kz;
3227 uma_zone_t z;
3228 uma_keg_t k;
3229 int count, error, i;
3230
3231 error = sysctl_wire_old_buffer(req, 0);
3232 if (error != 0)
3233 return (error);
3234 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3235
3236 count = 0;
3237 mtx_lock(&uma_mtx);
3238 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3239 LIST_FOREACH(z, &kz->uk_zones, uz_link)
3240 count++;
3241 }
3242
3243 /*
3244 * Insert stream header.
3245 */
3246 bzero(&ush, sizeof(ush));
3247 ush.ush_version = UMA_STREAM_VERSION;
3248 ush.ush_maxcpus = (mp_maxid + 1);
3249 ush.ush_count = count;
3250 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3251
3252 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3253 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3254 bzero(&uth, sizeof(uth));
3255 ZONE_LOCK(z);
3256 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3257 uth.uth_align = kz->uk_align;
3258 uth.uth_size = kz->uk_size;
3259 uth.uth_rsize = kz->uk_rsize;
3260 LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3261 k = kl->kl_keg;
3262 uth.uth_maxpages += k->uk_maxpages;
3263 uth.uth_pages += k->uk_pages;
3264 uth.uth_keg_free += k->uk_free;
3265 uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3266 * k->uk_ipers;
3267 }
3268
3269 /*
3270 * A zone is secondary is it is not the first entry
3271 * on the keg's zone list.
3272 */
3273 if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3274 (LIST_FIRST(&kz->uk_zones) != z))
3275 uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3276
3277 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3278 uth.uth_zone_free += bucket->ub_cnt;
3279 uth.uth_allocs = z->uz_allocs;
3280 uth.uth_frees = z->uz_frees;
3281 uth.uth_fails = z->uz_fails;
3282 uth.uth_sleeps = z->uz_sleeps;
3283 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3284 /*
3285 * While it is not normally safe to access the cache
3286 * bucket pointers while not on the CPU that owns the
3287 * cache, we only allow the pointers to be exchanged
3288 * without the zone lock held, not invalidated, so
3289 * accept the possible race associated with bucket
3290 * exchange during monitoring.
3291 */
3292 for (i = 0; i < (mp_maxid + 1); i++) {
3293 bzero(&ups, sizeof(ups));
3294 if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3295 goto skip;
3296 if (CPU_ABSENT(i))
3297 goto skip;
3298 cache = &z->uz_cpu[i];
3299 if (cache->uc_allocbucket != NULL)
3300 ups.ups_cache_free +=
3301 cache->uc_allocbucket->ub_cnt;
3302 if (cache->uc_freebucket != NULL)
3303 ups.ups_cache_free +=
3304 cache->uc_freebucket->ub_cnt;
3305 ups.ups_allocs = cache->uc_allocs;
3306 ups.ups_frees = cache->uc_frees;
3307 skip:
3308 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3309 }
3310 ZONE_UNLOCK(z);
3311 }
3312 }
3313 mtx_unlock(&uma_mtx);
3314 error = sbuf_finish(&sbuf);
3315 sbuf_delete(&sbuf);
3316 return (error);
3317 }
3318
3319 #ifdef DDB
3320 DB_SHOW_COMMAND(uma, db_show_uma)
3321 {
3322 u_int64_t allocs, frees, sleeps;
3323 uma_bucket_t bucket;
3324 uma_keg_t kz;
3325 uma_zone_t z;
3326 int cachefree;
3327
3328 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3329 "Requests", "Sleeps");
3330 LIST_FOREACH(kz, &uma_kegs, uk_link) {
3331 LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3332 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3333 allocs = z->uz_allocs;
3334 frees = z->uz_frees;
3335 sleeps = z->uz_sleeps;
3336 cachefree = 0;
3337 } else
3338 uma_zone_sumstat(z, &cachefree, &allocs,
3339 &frees, &sleeps);
3340 if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3341 (LIST_FIRST(&kz->uk_zones) != z)))
3342 cachefree += kz->uk_free;
3343 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3344 cachefree += bucket->ub_cnt;
3345 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3346 (uintmax_t)kz->uk_size,
3347 (intmax_t)(allocs - frees), cachefree,
3348 (uintmax_t)allocs, sleeps);
3349 if (db_pager_quit)
3350 return;
3351 }
3352 }
3353 }
3354 #endif
Cache object: f07cdd30757181587853d17ce5e6c718
|