FreeBSD/Linux Kernel Cross Reference
sys/vm/uma_int.h
1 /*
2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.0/sys/vm/uma_int.h 106277 2002-11-01 01:01:27Z jeff $
27 *
28 */
29
30 /*
31 * This file includes definitions, structures, prototypes, and inlines that
32 * should not be used outside of the actual implementation of UMA.
33 */
34
35 /*
36 * Here's a quick description of the relationship between the objects:
37 *
38 * Zones contain lists of slabs which are stored in either the full bin, empty
39 * bin, or partially allocated bin, to reduce fragmentation. They also contain
40 * the user supplied value for size, which is adjusted for alignment purposes
41 * and rsize is the result of that. The zone also stores information for
42 * managing a hash of page addresses that maps pages to uma_slab_t structures
43 * for pages that don't have embedded uma_slab_t's.
44 *
45 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
46 * be allocated off the page from a special slab zone. The free list within a
47 * slab is managed with a linked list of indexes, which are 8 bit values. If
48 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
49 * values. Currently on alpha you can get 250 or so 32 byte items and on x86
50 * you can get 250 or so 16byte items. For item sizes that would yield more
51 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
52 * will improve the number of items per slab that will fit.
53 *
54 * Other potential space optimizations are storing the 8bit of linkage in space
55 * wasted between items due to alignment problems. This may yield a much better
56 * memory footprint for certain sizes of objects. Another alternative is to
57 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
58 * dynamic slab sizes because we could stick with 8 bit indexes and only use
59 * large slab sizes for zones with a lot of waste per slab. This may create
60 * ineffeciencies in the vm subsystem due to fragmentation in the address space.
61 *
62 * The only really gross cases, with regards to memory waste, are for those
63 * items that are just over half the page size. You can get nearly 50% waste,
64 * so you fall back to the memory footprint of the power of two allocator. I
65 * have looked at memory allocation sizes on many of the machines available to
66 * me, and there does not seem to be an abundance of allocations at this range
67 * so at this time it may not make sense to optimize for it. This can, of
68 * course, be solved with dynamic slab sizes.
69 *
70 */
71
72 /*
73 * This is the representation for normal (Non OFFPAGE slab)
74 *
75 * i == item
76 * s == slab pointer
77 *
78 * <---------------- Page (UMA_SLAB_SIZE) ------------------>
79 * ___________________________________________________________
80 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
81 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
82 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
83 * |___________________________________________________________|
84 *
85 *
86 * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
87 *
88 * ___________________________________________________________
89 * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
90 * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
91 * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
92 * |___________________________________________________________|
93 * ___________ ^
94 * |slab header| |
95 * |___________|---*
96 *
97 */
98
99 #ifndef VM_UMA_INT_H
100 #define VM_UMA_INT_H
101
102 #define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
103 #define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
104 #define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
105
106 #define UMA_BOOT_PAGES 30 /* Number of pages allocated for startup */
107 #define UMA_WORKING_TIME 20 /* Seconds worth of items to keep */
108
109
110 /* Max waste before going to off page slab management */
111 #define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
112
113 /*
114 * I doubt there will be many cases where this is exceeded. This is the initial
115 * size of the hash table for uma_slabs that are managed off page. This hash
116 * does expand by powers of two. Currently it doesn't get smaller.
117 */
118 #define UMA_HASH_SIZE_INIT 32
119
120
121 /*
122 * I should investigate other hashing algorithms. This should yield a low
123 * number of collisions if the pages are relatively contiguous.
124 *
125 * This is the same algorithm that most processor caches use.
126 *
127 * I'm shifting and masking instead of % because it should be faster.
128 */
129
130 #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
131 (h)->uh_hashmask)
132
133 #define UMA_HASH_INSERT(h, s, mem) \
134 SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
135 (mem))], (s), us_hlink);
136 #define UMA_HASH_REMOVE(h, s, mem) \
137 SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
138 (mem))], (s), uma_slab, us_hlink);
139
140 /* Page management structure */
141
142 /* Sorry for the union, but space efficiency is important */
143 struct uma_slab {
144 uma_zone_t us_zone; /* Zone we live in */
145 union {
146 LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
147 unsigned long us_size; /* Size of allocation */
148 } us_type;
149 SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
150 u_int8_t *us_data; /* First item */
151 u_int8_t us_flags; /* Page flags see uma.h */
152 u_int8_t us_freecount; /* How many are free? */
153 u_int8_t us_firstfree; /* First free item index */
154 u_int8_t us_freelist[1]; /* Free List (actually larger) */
155 };
156
157 #define us_link us_type.us_link
158 #define us_size us_type.us_size
159
160 typedef struct uma_slab * uma_slab_t;
161
162 /* Hash table for freed address -> slab translation */
163
164 SLIST_HEAD(slabhead, uma_slab);
165
166 struct uma_hash {
167 struct slabhead *uh_slab_hash; /* Hash table for slabs */
168 int uh_hashsize; /* Current size of the hash table */
169 int uh_hashmask; /* Mask used during hashing */
170 };
171
172 /*
173 * Structures for per cpu queues.
174 */
175
176 /*
177 * This size was chosen so that the struct bucket size is roughly
178 * 128 * sizeof(void *). This is exactly true for x86, and for alpha
179 * it will would be 32bits smaller if it didn't have alignment adjustments.
180 */
181
182 #define UMA_BUCKET_SIZE 125
183
184 struct uma_bucket {
185 LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
186 int16_t ub_ptr; /* Pointer to current item */
187 void *ub_bucket[UMA_BUCKET_SIZE]; /* actual allocation storage */
188 };
189
190 typedef struct uma_bucket * uma_bucket_t;
191
192 struct uma_cache {
193 struct mtx uc_lock; /* Spin lock on this cpu's bucket */
194 uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
195 uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
196 u_int64_t uc_allocs; /* Count of allocations */
197 };
198
199 typedef struct uma_cache * uma_cache_t;
200
201 #define LOCKNAME_LEN 16 /* Length of the name for cpu locks */
202
203 /*
204 * Zone management structure
205 *
206 * TODO: Optimize for cache line size
207 *
208 */
209 struct uma_zone {
210 char uz_lname[LOCKNAME_LEN]; /* Text name for the cpu lock */
211 char *uz_name; /* Text name of the zone */
212 LIST_ENTRY(uma_zone) uz_link; /* List of all zones */
213 u_int32_t uz_align; /* Alignment mask */
214 u_int32_t uz_pages; /* Total page count */
215
216 /* Used during alloc / free */
217 struct mtx uz_lock; /* Lock for the zone */
218 u_int32_t uz_free; /* Count of items free in slabs */
219 u_int16_t uz_ipers; /* Items per slab */
220 u_int16_t uz_flags; /* Internal flags */
221
222 LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */
223 LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */
224 LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */
225 LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
226 LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
227 u_int32_t uz_size; /* Requested size of each item */
228 u_int32_t uz_rsize; /* Real size of each item */
229
230 struct uma_hash uz_hash;
231 u_int16_t uz_pgoff; /* Offset to uma_slab struct */
232 u_int16_t uz_ppera; /* pages per allocation from backend */
233 u_int16_t uz_cacheoff; /* Next cache offset */
234 u_int16_t uz_cachemax; /* Max cache offset */
235
236 uma_ctor uz_ctor; /* Constructor for each allocation */
237 uma_dtor uz_dtor; /* Destructor */
238 u_int64_t uz_allocs; /* Total number of allocations */
239
240 uma_init uz_init; /* Initializer for each item */
241 uma_fini uz_fini; /* Discards memory */
242 uma_alloc uz_allocf; /* Allocation function */
243 uma_free uz_freef; /* Free routine */
244 struct vm_object *uz_obj; /* Zone specific object */
245 vm_offset_t uz_kva; /* Base kva for zones with objs */
246 u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
247 u_int32_t uz_cachefree; /* Last count of items free in caches */
248 u_int64_t uz_oallocs; /* old allocs count */
249 u_int64_t uz_wssize; /* Working set size */
250 int uz_recurse; /* Allocation recursion count */
251 uint16_t uz_fills; /* Outstanding bucket fills */
252 uint16_t uz_count; /* Highest value ub_ptr can have */
253 /*
254 * This HAS to be the last item because we adjust the zone size
255 * based on NCPU and then allocate the space for the zones.
256 */
257 struct uma_cache uz_cpu[1]; /* Per cpu caches */
258 };
259
260 #define UMA_CACHE_INC 16 /* How much will we move data */
261
262 #define UMA_ZFLAG_OFFPAGE 0x0001 /* Struct slab/freelist off page */
263 #define UMA_ZFLAG_PRIVALLOC 0x0002 /* Zone has supplied it's own alloc */
264 #define UMA_ZFLAG_INTERNAL 0x0004 /* Internal zone, no offpage no PCPU */
265 #define UMA_ZFLAG_MALLOC 0x0008 /* Zone created by malloc */
266 #define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */
267 #define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */
268 #define UMA_ZFLAG_BUCKETCACHE 0x0040 /* Only allocate buckets from cache */
269 #define UMA_ZFLAG_HASH 0x0080 /* Look up slab via hash */
270
271 /* This lives in uflags */
272 #define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */
273
274 /* Internal prototypes */
275 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
276 void *uma_large_malloc(int size, int wait);
277 void uma_large_free(uma_slab_t slab);
278
279 /* Lock Macros */
280
281 #define ZONE_LOCK_INIT(z, lc) \
282 do { \
283 if ((lc)) \
284 mtx_init(&(z)->uz_lock, (z)->uz_name, \
285 (z)->uz_name, MTX_DEF | MTX_DUPOK); \
286 else \
287 mtx_init(&(z)->uz_lock, (z)->uz_name, \
288 "UMA zone", MTX_DEF | MTX_DUPOK); \
289 } while (0)
290
291 #define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
292 #define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
293 #define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
294
295 #define CPU_LOCK_INIT(z, cpu, lc) \
296 do { \
297 if ((lc)) \
298 mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
299 (z)->uz_lname, (z)->uz_lname, \
300 MTX_DEF | MTX_DUPOK); \
301 else \
302 mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
303 (z)->uz_lname, "UMA cpu", \
304 MTX_DEF | MTX_DUPOK); \
305 } while (0)
306
307 #define CPU_LOCK_FINI(z, cpu) \
308 mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)
309
310 #define CPU_LOCK(z, cpu) \
311 mtx_lock(&(z)->uz_cpu[(cpu)].uc_lock)
312
313 #define CPU_UNLOCK(z, cpu) \
314 mtx_unlock(&(z)->uz_cpu[(cpu)].uc_lock)
315
316 /*
317 * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
318 * the slab structure.
319 *
320 * Arguments:
321 * hash The hash table to search.
322 * data The base page of the item.
323 *
324 * Returns:
325 * A pointer to a slab if successful, else NULL.
326 */
327 static __inline uma_slab_t
328 hash_sfind(struct uma_hash *hash, u_int8_t *data)
329 {
330 uma_slab_t slab;
331 int hval;
332
333 hval = UMA_HASH(hash, data);
334
335 SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
336 if ((u_int8_t *)slab->us_data == data)
337 return (slab);
338 }
339 return (NULL);
340 }
341
342 static __inline uma_slab_t
343 vtoslab(vm_offset_t va)
344 {
345 vm_page_t p;
346 uma_slab_t slab;
347
348 p = PHYS_TO_VM_PAGE(pmap_kextract(va));
349 slab = (uma_slab_t )p->object;
350
351 if (p->flags & PG_SLAB)
352 return (slab);
353 else
354 return (NULL);
355 }
356
357 static __inline void
358 vsetslab(vm_offset_t va, uma_slab_t slab)
359 {
360 vm_page_t p;
361
362 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
363 p->object = (vm_object_t)slab;
364 p->flags |= PG_SLAB;
365 }
366
367 static __inline void
368 vsetobj(vm_offset_t va, vm_object_t obj)
369 {
370 vm_page_t p;
371
372 p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
373 p->object = obj;
374 p->flags &= ~PG_SLAB;
375 }
376
377 /*
378 * The following two functions may be defined by architecture specific code
379 * if they can provide more effecient allocation functions. This is useful
380 * for using direct mapped addresses.
381 */
382 void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
383 void uma_small_free(void *mem, int size, u_int8_t flags);
384
385 #endif /* VM_UMA_INT_H */
Cache object: 8f7b00e9ac1b383aa4f30397a060c47f
|