FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_int.h
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/12.0/sys/vm/uma_int.h 340011 2018-11-01 17:36:42Z markj $
30 *
31 */
32
33 #include <sys/_bitset.h>
34 #include <sys/_domainset.h>
35 #include <sys/_task.h>
36
37 /*
38 * This file includes definitions, structures, prototypes, and inlines that
39 * should not be used outside of the actual implementation of UMA.
40 */
41
42 /*
43 * The brief summary; Zones describe unique allocation types. Zones are
44 * organized into per-CPU caches which are filled by buckets. Buckets are
45 * organized according to memory domains. Buckets are filled from kegs which
46 * are also organized according to memory domains. Kegs describe a unique
47 * allocation type, backend memory provider, and layout. Kegs are associated
48 * with one or more zones and zones reference one or more kegs. Kegs provide
49 * slabs which are virtually contiguous collections of pages. Each slab is
50 * broken down int one or more items that will satisfy an individual allocation.
51 *
52 * Allocation is satisfied in the following order:
53 * 1) Per-CPU cache
54 * 2) Per-domain cache of buckets
55 * 3) Slab from any of N kegs
56 * 4) Backend page provider
57 *
58 * More detail on individual objects is contained below:
59 *
60 * Kegs contain lists of slabs which are stored in either the full bin, empty
61 * bin, or partially allocated bin, to reduce fragmentation. They also contain
62 * the user supplied value for size, which is adjusted for alignment purposes
63 * and rsize is the result of that. The Keg also stores information for
64 * managing a hash of page addresses that maps pages to uma_slab_t structures
65 * for pages that don't have embedded uma_slab_t's.
66 *
67 * Keg slab lists are organized by memory domain to support NUMA allocation
68 * policies. By default allocations are spread across domains to reduce the
69 * potential for hotspots. Special keg creation flags may be specified to
70 * prefer location allocation. However there is no strict enforcement as frees
71 * may happen on any CPU and these are returned to the CPU-local cache
72 * regardless of the originating domain.
73 *
74 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
75 * be allocated off the page from a special slab zone. The free list within a
76 * slab is managed with a bitmask. For item sizes that would yield more than
77 * 10% memory waste we potentially allocate a separate uma_slab_t if this will
78 * improve the number of items per slab that will fit.
79 *
80 * The only really gross cases, with regards to memory waste, are for those
81 * items that are just over half the page size. You can get nearly 50% waste,
82 * so you fall back to the memory footprint of the power of two allocator. I
83 * have looked at memory allocation sizes on many of the machines available to
84 * me, and there does not seem to be an abundance of allocations at this range
85 * so at this time it may not make sense to optimize for it. This can, of
86 * course, be solved with dynamic slab sizes.
87 *
88 * Kegs may serve multiple Zones but by far most of the time they only serve
89 * one. When a Zone is created, a Keg is allocated and setup for it. While
90 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
91 * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
92 * pair, as well as with its own set of small per-CPU caches, layered above
93 * the Zone's general Bucket cache.
94 *
95 * The PCPU caches are protected by critical sections, and may be accessed
96 * safely only from their associated CPU, while the Zones backed by the same
97 * Keg all share a common Keg lock (to coalesce contention on the backing
98 * slabs). The backing Keg typically only serves one Zone but in the case of
99 * multiple Zones, one of the Zones is considered the Master Zone and all
100 * Zone-related stats from the Keg are done in the Master Zone. For an
101 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
102 */
103
104 /*
105 * This is the representation for normal (Non OFFPAGE slab)
106 *
107 * i == item
108 * s == slab pointer
109 *
110 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
111 * ___________________________________________________________
112 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
113 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
114 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
115 * |___________________________________________________________|
116 *
117 *
118 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
119 *
120 * ___________________________________________________________
121 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
122 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
123 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
124 * |___________________________________________________________|
125 * ___________ ^
126 * |slab header| |
127 * |___________|---*
128 *
129 */
130
131 #ifndef VM_UMA_INT_H
132 #define VM_UMA_INT_H
133
134 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
135 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
136 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
137
138 /* Max waste percentage before going to off page slab management */
139 #define UMA_MAX_WASTE 10
140
141 /*
142 * Size of memory in a not offpage slab available for actual items.
143 */
144 #define UMA_SLAB_SPACE (UMA_SLAB_SIZE - sizeof(struct uma_slab))
145
146 /*
147 * I doubt there will be many cases where this is exceeded. This is the initial
148 * size of the hash table for uma_slabs that are managed off page. This hash
149 * does expand by powers of two. Currently it doesn't get smaller.
150 */
151 #define UMA_HASH_SIZE_INIT 32
152
153 /*
154 * I should investigate other hashing algorithms. This should yield a low
155 * number of collisions if the pages are relatively contiguous.
156 */
157
158 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
159
160 #define UMA_HASH_INSERT(h, s, mem) \
161 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
162 (mem))], (s), us_hlink)
163 #define UMA_HASH_REMOVE(h, s, mem) \
164 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
165 (mem))], (s), uma_slab, us_hlink)
166
167 /* Hash table for freed address -> slab translation */
168
169 SLIST_HEAD(slabhead, uma_slab);
170
171 struct uma_hash {
172 struct slabhead *uh_slab_hash; /* Hash table for slabs */
173 int uh_hashsize; /* Current size of the hash table */
174 int uh_hashmask; /* Mask used during hashing */
175 };
176
177 /*
178 * align field or structure to cache line
179 */
180 #if defined(__amd64__) || defined(__powerpc64__)
181 #define UMA_ALIGN __aligned(128)
182 #else
183 #define UMA_ALIGN
184 #endif
185
186 /*
187 * Structures for per cpu queues.
188 */
189
190 struct uma_bucket {
191 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
192 int16_t ub_cnt; /* Count of items in bucket. */
193 int16_t ub_entries; /* Max items. */
194 void *ub_bucket[]; /* actual allocation storage */
195 };
196
197 typedef struct uma_bucket * uma_bucket_t;
198
199 struct uma_cache {
200 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
201 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
202 uint64_t uc_allocs; /* Count of allocations */
203 uint64_t uc_frees; /* Count of frees */
204 } UMA_ALIGN;
205
206 typedef struct uma_cache * uma_cache_t;
207
208 /*
209 * Per-domain memory list. Embedded in the kegs.
210 */
211 struct uma_domain {
212 LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */
213 LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */
214 LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */
215 };
216
217 typedef struct uma_domain * uma_domain_t;
218
219 /*
220 * Keg management structure
221 *
222 * TODO: Optimize for cache line size
223 *
224 */
225 struct uma_keg {
226 struct mtx uk_lock; /* Lock for the keg */
227 struct uma_hash uk_hash;
228 LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
229
230 struct domainset_ref uk_dr; /* Domain selection policy. */
231 uint32_t uk_align; /* Alignment mask */
232 uint32_t uk_pages; /* Total page count */
233 uint32_t uk_free; /* Count of items free in slabs */
234 uint32_t uk_reserve; /* Number of reserved items. */
235 uint32_t uk_size; /* Requested size of each item */
236 uint32_t uk_rsize; /* Real size of each item */
237 uint32_t uk_maxpages; /* Maximum number of pages to alloc */
238
239 uma_init uk_init; /* Keg's init routine */
240 uma_fini uk_fini; /* Keg's fini routine */
241 uma_alloc uk_allocf; /* Allocation function */
242 uma_free uk_freef; /* Free routine */
243
244 u_long uk_offset; /* Next free offset from base KVA */
245 vm_offset_t uk_kva; /* Zone base KVA */
246 uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
247
248 uint32_t uk_pgoff; /* Offset to uma_slab struct */
249 uint16_t uk_ppera; /* pages per allocation from backend */
250 uint16_t uk_ipers; /* Items per slab */
251 uint32_t uk_flags; /* Internal flags */
252
253 /* Least used fields go to the last cache line. */
254 const char *uk_name; /* Name of creating zone. */
255 LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
256
257 /* Must be last, variable sized. */
258 struct uma_domain uk_domain[]; /* Keg's slab lists. */
259 };
260 typedef struct uma_keg * uma_keg_t;
261
262 /*
263 * Free bits per-slab.
264 */
265 #define SLAB_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
266 BITSET_DEFINE(slabbits, SLAB_SETSIZE);
267
268 /*
269 * The slab structure manages a single contiguous allocation from backing
270 * store and subdivides it into individually allocatable items.
271 */
272 struct uma_slab {
273 uma_keg_t us_keg; /* Keg we live in */
274 union {
275 LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
276 unsigned long _us_size; /* Size of allocation */
277 } us_type;
278 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
279 uint8_t *us_data; /* First item */
280 struct slabbits us_free; /* Free bitmask. */
281 #ifdef INVARIANTS
282 struct slabbits us_debugfree; /* Debug bitmask. */
283 #endif
284 uint16_t us_freecount; /* How many are free? */
285 uint8_t us_flags; /* Page flags see uma.h */
286 uint8_t us_domain; /* Backing NUMA domain. */
287 };
288
289 #define us_link us_type._us_link
290 #define us_size us_type._us_size
291
292 #if MAXMEMDOM >= 255
293 #error "Slab domain type insufficient"
294 #endif
295
296 typedef struct uma_slab * uma_slab_t;
297 typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int, int);
298
299 struct uma_klink {
300 LIST_ENTRY(uma_klink) kl_link;
301 uma_keg_t kl_keg;
302 };
303 typedef struct uma_klink *uma_klink_t;
304
305 struct uma_zone_domain {
306 LIST_HEAD(,uma_bucket) uzd_buckets; /* full buckets */
307 };
308
309 typedef struct uma_zone_domain * uma_zone_domain_t;
310
311 /*
312 * Zone management structure
313 *
314 * TODO: Optimize for cache line size
315 *
316 */
317 struct uma_zone {
318 /* Offset 0, used in alloc/free fast/medium fast path and const. */
319 struct mtx *uz_lockptr;
320 const char *uz_name; /* Text name of the zone */
321 struct uma_zone_domain *uz_domain; /* per-domain buckets */
322 uint32_t uz_flags; /* Flags inherited from kegs */
323 uint32_t uz_size; /* Size inherited from kegs */
324 uma_ctor uz_ctor; /* Constructor for each allocation */
325 uma_dtor uz_dtor; /* Destructor */
326 uma_init uz_init; /* Initializer for each item */
327 uma_fini uz_fini; /* Finalizer for each item. */
328
329 /* Offset 64, used in bucket replenish. */
330 uma_import uz_import; /* Import new memory to cache. */
331 uma_release uz_release; /* Release memory from cache. */
332 void *uz_arg; /* Import/release argument. */
333 uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
334 uint16_t uz_count; /* Amount of items in full bucket */
335 uint16_t uz_count_min; /* Minimal amount of items there */
336 /* 32bit pad on 64bit. */
337 LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
338 LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
339
340 /* Offset 128 Rare. */
341 /*
342 * The lock is placed here to avoid adjacent line prefetcher
343 * in fast paths and to take up space near infrequently accessed
344 * members to reduce alignment overhead.
345 */
346 struct mtx uz_lock; /* Lock for the zone */
347 struct uma_klink uz_klink; /* klink for first keg. */
348 /* The next two fields are used to print a rate-limited warnings. */
349 const char *uz_warning; /* Warning to print on failure */
350 struct timeval uz_ratecheck; /* Warnings rate-limiting */
351 struct task uz_maxaction; /* Task to run when at limit */
352
353 /* 16 bytes of pad. */
354
355 /* Offset 256, atomic stats. */
356 volatile u_long uz_allocs UMA_ALIGN; /* Total number of allocations */
357 volatile u_long uz_fails; /* Total number of alloc failures */
358 volatile u_long uz_frees; /* Total number of frees */
359 uint64_t uz_sleeps; /* Total number of alloc sleeps */
360
361 /*
362 * This HAS to be the last item because we adjust the zone size
363 * based on NCPU and then allocate the space for the zones.
364 */
365 struct uma_cache uz_cpu[]; /* Per cpu caches */
366
367 /* uz_domain follows here. */
368 };
369
370 /*
371 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
372 */
373 #define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
374 #define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
375 #define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
376 #define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
377 #define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
378 #define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
379
380 #define UMA_ZFLAG_INHERIT \
381 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
382
383 static inline uma_keg_t
384 zone_first_keg(uma_zone_t zone)
385 {
386 uma_klink_t klink;
387
388 klink = LIST_FIRST(&zone->uz_kegs);
389 return (klink != NULL) ? klink->kl_keg : NULL;
390 }
391
392 #undef UMA_ALIGN
393
394 #ifdef _KERNEL
395 /* Internal prototypes */
396 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
397 void *uma_large_malloc(vm_size_t size, int wait);
398 void *uma_large_malloc_domain(vm_size_t size, int domain, int wait);
399 void uma_large_free(uma_slab_t slab);
400
401 /* Lock Macros */
402
403 #define KEG_LOCK_INIT(k, lc) \
404 do { \
405 if ((lc)) \
406 mtx_init(&(k)->uk_lock, (k)->uk_name, \
407 (k)->uk_name, MTX_DEF | MTX_DUPOK); \
408 else \
409 mtx_init(&(k)->uk_lock, (k)->uk_name, \
410 "UMA zone", MTX_DEF | MTX_DUPOK); \
411 } while (0)
412
413 #define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
414 #define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
415 #define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
416
417 #define ZONE_LOCK_INIT(z, lc) \
418 do { \
419 if ((lc)) \
420 mtx_init(&(z)->uz_lock, (z)->uz_name, \
421 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
422 else \
423 mtx_init(&(z)->uz_lock, (z)->uz_name, \
424 "UMA zone", MTX_DEF | MTX_DUPOK); \
425 } while (0)
426
427 #define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr)
428 #define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
429 #define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr)
430 #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
431
432 /*
433 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
434 * the slab structure.
435 *
436 * Arguments:
437 * hash The hash table to search.
438 * data The base page of the item.
439 *
440 * Returns:
441 * A pointer to a slab if successful, else NULL.
442 */
443 static __inline uma_slab_t
444 hash_sfind(struct uma_hash *hash, uint8_t *data)
445 {
446 uma_slab_t slab;
447 int hval;
448
449 hval = UMA_HASH(hash, data);
450
451 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
452 if ((uint8_t *)slab->us_data == data)
453 return (slab);
454 }
455 return (NULL);
456 }
457
458 static __inline uma_slab_t
459 vtoslab(vm_offset_t va)
460 {
461 vm_page_t p;
462
463 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
464 return ((uma_slab_t)p->plinks.s.pv);
465 }
466
467 static __inline void
468 vsetslab(vm_offset_t va, uma_slab_t slab)
469 {
470 vm_page_t p;
471
472 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
473 p->plinks.s.pv = slab;
474 }
475
476 /*
477 * The following two functions may be defined by architecture specific code
478 * if they can provide more efficient allocation functions. This is useful
479 * for using direct mapped addresses.
480 */
481 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
482 uint8_t *pflag, int wait);
483 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
484
485 /* Set a global soft limit on UMA managed memory. */
486 void uma_set_limit(unsigned long limit);
487 #endif /* _KERNEL */
488
489 #endif /* VM_UMA_INT_H */
Cache object: 0fea6ed6969d423b5ddd3b896732af6b
|