FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_vmem.c
1 /* $NetBSD: subr_vmem.c,v 1.108 2022/05/31 08:43:16 andvar Exp $ */
2
3 /*-
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html
34 *
35 * locking & the boundary tag pool:
36 * - A pool(9) is used for vmem boundary tags
37 * - During a pool get call the global vmem_btag_refill_lock is taken,
38 * to serialize access to the allocation reserve, but no other
39 * vmem arena locks.
40 * - During pool_put calls no vmem mutexes are locked.
41 * - pool_drain doesn't hold the pool's mutex while releasing memory to
42 * its backing therefore no interference with any vmem mutexes.
43 * - The boundary tag pool is forced to put page headers into pool pages
44 * (PR_PHINPAGE) and not off page to avoid pool recursion.
45 * (due to sizeof(bt_t) it should be the case anyway)
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.108 2022/05/31 08:43:16 andvar Exp $");
50
51 #if defined(_KERNEL) && defined(_KERNEL_OPT)
52 #include "opt_ddb.h"
53 #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
54
55 #include <sys/param.h>
56 #include <sys/hash.h>
57 #include <sys/queue.h>
58 #include <sys/bitops.h>
59
60 #if defined(_KERNEL)
61 #include <sys/systm.h>
62 #include <sys/kernel.h> /* hz */
63 #include <sys/callout.h>
64 #include <sys/kmem.h>
65 #include <sys/pool.h>
66 #include <sys/vmem.h>
67 #include <sys/vmem_impl.h>
68 #include <sys/workqueue.h>
69 #include <sys/atomic.h>
70 #include <uvm/uvm.h>
71 #include <uvm/uvm_extern.h>
72 #include <uvm/uvm_km.h>
73 #include <uvm/uvm_page.h>
74 #include <uvm/uvm_pdaemon.h>
75 #else /* defined(_KERNEL) */
76 #include <stdio.h>
77 #include <errno.h>
78 #include <assert.h>
79 #include <stdlib.h>
80 #include <string.h>
81 #include "../sys/vmem.h"
82 #include "../sys/vmem_impl.h"
83 #endif /* defined(_KERNEL) */
84
85
86 #if defined(_KERNEL)
87 #include <sys/evcnt.h>
88 #define VMEM_EVCNT_DEFINE(name) \
89 struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
90 "vmem", #name); \
91 EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
92 #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++
93 #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count--
94
95 VMEM_EVCNT_DEFINE(static_bt_count)
96 VMEM_EVCNT_DEFINE(static_bt_inuse)
97
98 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
99 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
100 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
101 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
102
103 #else /* defined(_KERNEL) */
104
105 #define VMEM_EVCNT_INCR(ev) /* nothing */
106 #define VMEM_EVCNT_DECR(ev) /* nothing */
107
108 #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
109 #define VMEM_CONDVAR_DESTROY(vm) /* nothing */
110 #define VMEM_CONDVAR_WAIT(vm) /* nothing */
111 #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
112
113 #define UNITTEST
114 #define KASSERT(a) assert(a)
115 #define mutex_init(a, b, c) /* nothing */
116 #define mutex_destroy(a) /* nothing */
117 #define mutex_enter(a) /* nothing */
118 #define mutex_tryenter(a) true
119 #define mutex_exit(a) /* nothing */
120 #define mutex_owned(a) /* nothing */
121 #define ASSERT_SLEEPABLE() /* nothing */
122 #define panic(...) printf(__VA_ARGS__); abort()
123 #endif /* defined(_KERNEL) */
124
125 #if defined(VMEM_SANITY)
126 static void vmem_check(vmem_t *);
127 #else /* defined(VMEM_SANITY) */
128 #define vmem_check(vm) /* nothing */
129 #endif /* defined(VMEM_SANITY) */
130
131 #define VMEM_HASHSIZE_MIN 1 /* XXX */
132 #define VMEM_HASHSIZE_MAX 65536 /* XXX */
133 #define VMEM_HASHSIZE_INIT 1
134
135 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
136
137 #if defined(_KERNEL)
138 static bool vmem_bootstrapped = false;
139 static kmutex_t vmem_list_lock;
140 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
141 #endif /* defined(_KERNEL) */
142
143 /* ---- misc */
144
145 #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
146 #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
147 #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
148 #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
149 #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
150 #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
151
152 #define VMEM_ALIGNUP(addr, align) \
153 (-(-(addr) & -(align)))
154
155 #define VMEM_CROSS_P(addr1, addr2, boundary) \
156 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
157
158 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
159 #define SIZE2ORDER(size) ((int)ilog2(size))
160
161 #if !defined(_KERNEL)
162 #define xmalloc(sz, flags) malloc(sz)
163 #define xfree(p, sz) free(p)
164 #define bt_alloc(vm, flags) malloc(sizeof(bt_t))
165 #define bt_free(vm, bt) free(bt)
166 #else /* defined(_KERNEL) */
167
168 #define xmalloc(sz, flags) \
169 kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
170 #define xfree(p, sz) kmem_free(p, sz);
171
172 /*
173 * BT_RESERVE calculation:
174 * we allocate memory for boundary tags with vmem; therefore we have
175 * to keep a reserve of bts used to allocated memory for bts.
176 * This reserve is 4 for each arena involved in allocating vmems memory.
177 * BT_MAXFREE: don't cache excessive counts of bts in arenas
178 */
179 #define STATIC_BT_COUNT 200
180 #define BT_MINRESERVE 4
181 #define BT_MAXFREE 64
182
183 static struct vmem_btag static_bts[STATIC_BT_COUNT];
184 static int static_bt_count = STATIC_BT_COUNT;
185
186 static struct vmem kmem_va_meta_arena_store;
187 vmem_t *kmem_va_meta_arena;
188 static struct vmem kmem_meta_arena_store;
189 vmem_t *kmem_meta_arena = NULL;
190
191 static kmutex_t vmem_btag_refill_lock;
192 static kmutex_t vmem_btag_lock;
193 static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
194 static size_t vmem_btag_freelist_count = 0;
195 static struct pool vmem_btag_pool;
196
197 static void vmem_xfree_bt(vmem_t *, bt_t *);
198
199 static void
200 vmem_kick_pdaemon(void)
201 {
202 #if defined(_KERNEL)
203 uvm_kick_pdaemon();
204 #endif
205 }
206
207 /* ---- boundary tag */
208
209 static int bt_refill(vmem_t *vm);
210 static int bt_refill_locked(vmem_t *vm);
211
212 static void *
213 pool_page_alloc_vmem_meta(struct pool *pp, int flags)
214 {
215 const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
216 vmem_addr_t va;
217 int ret;
218
219 ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
220 (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
221
222 return ret ? NULL : (void *)va;
223 }
224
225 static void
226 pool_page_free_vmem_meta(struct pool *pp, void *v)
227 {
228
229 vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
230 }
231
232 /* allocator for vmem-pool metadata */
233 struct pool_allocator pool_allocator_vmem_meta = {
234 .pa_alloc = pool_page_alloc_vmem_meta,
235 .pa_free = pool_page_free_vmem_meta,
236 .pa_pagesz = 0
237 };
238
239 static int
240 bt_refill_locked(vmem_t *vm)
241 {
242 bt_t *bt;
243
244 VMEM_ASSERT_LOCKED(vm);
245
246 if (vm->vm_nfreetags > BT_MINRESERVE) {
247 return 0;
248 }
249
250 mutex_enter(&vmem_btag_lock);
251 while (!LIST_EMPTY(&vmem_btag_freelist) &&
252 vm->vm_nfreetags <= BT_MINRESERVE) {
253 bt = LIST_FIRST(&vmem_btag_freelist);
254 LIST_REMOVE(bt, bt_freelist);
255 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
256 vm->vm_nfreetags++;
257 vmem_btag_freelist_count--;
258 VMEM_EVCNT_INCR(static_bt_inuse);
259 }
260 mutex_exit(&vmem_btag_lock);
261
262 while (vm->vm_nfreetags <= BT_MINRESERVE) {
263 VMEM_UNLOCK(vm);
264 mutex_enter(&vmem_btag_refill_lock);
265 bt = pool_get(&vmem_btag_pool, PR_NOWAIT);
266 mutex_exit(&vmem_btag_refill_lock);
267 VMEM_LOCK(vm);
268 if (bt == NULL)
269 break;
270 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
271 vm->vm_nfreetags++;
272 }
273
274 if (vm->vm_nfreetags <= BT_MINRESERVE) {
275 return ENOMEM;
276 }
277
278 if (kmem_meta_arena != NULL) {
279 VMEM_UNLOCK(vm);
280 (void)bt_refill(kmem_arena);
281 (void)bt_refill(kmem_va_meta_arena);
282 (void)bt_refill(kmem_meta_arena);
283 VMEM_LOCK(vm);
284 }
285
286 return 0;
287 }
288
289 static int
290 bt_refill(vmem_t *vm)
291 {
292 int rv;
293
294 VMEM_LOCK(vm);
295 rv = bt_refill_locked(vm);
296 VMEM_UNLOCK(vm);
297 return rv;
298 }
299
300 static bt_t *
301 bt_alloc(vmem_t *vm, vm_flag_t flags)
302 {
303 bt_t *bt;
304
305 VMEM_ASSERT_LOCKED(vm);
306
307 while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
308 if (bt_refill_locked(vm)) {
309 if ((flags & VM_NOSLEEP) != 0) {
310 return NULL;
311 }
312
313 /*
314 * It would be nice to wait for something specific here
315 * but there are multiple ways that a retry could
316 * succeed and we can't wait for multiple things
317 * simultaneously. So we'll just sleep for an arbitrary
318 * short period of time and retry regardless.
319 * This should be a very rare case.
320 */
321
322 vmem_kick_pdaemon();
323 kpause("btalloc", false, 1, &vm->vm_lock);
324 }
325 }
326 bt = LIST_FIRST(&vm->vm_freetags);
327 LIST_REMOVE(bt, bt_freelist);
328 vm->vm_nfreetags--;
329
330 return bt;
331 }
332
333 static void
334 bt_free(vmem_t *vm, bt_t *bt)
335 {
336
337 VMEM_ASSERT_LOCKED(vm);
338
339 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
340 vm->vm_nfreetags++;
341 }
342
343 static void
344 bt_freetrim(vmem_t *vm, int freelimit)
345 {
346 bt_t *t;
347 LIST_HEAD(, vmem_btag) tofree;
348
349 VMEM_ASSERT_LOCKED(vm);
350
351 LIST_INIT(&tofree);
352
353 while (vm->vm_nfreetags > freelimit) {
354 bt_t *bt = LIST_FIRST(&vm->vm_freetags);
355 LIST_REMOVE(bt, bt_freelist);
356 vm->vm_nfreetags--;
357 if (bt >= static_bts
358 && bt < &static_bts[STATIC_BT_COUNT]) {
359 mutex_enter(&vmem_btag_lock);
360 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
361 vmem_btag_freelist_count++;
362 mutex_exit(&vmem_btag_lock);
363 VMEM_EVCNT_DECR(static_bt_inuse);
364 } else {
365 LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
366 }
367 }
368
369 VMEM_UNLOCK(vm);
370 while (!LIST_EMPTY(&tofree)) {
371 t = LIST_FIRST(&tofree);
372 LIST_REMOVE(t, bt_freelist);
373 pool_put(&vmem_btag_pool, t);
374 }
375 }
376 #endif /* defined(_KERNEL) */
377
378 /*
379 * freelist[0] ... [1, 1]
380 * freelist[1] ... [2, 3]
381 * freelist[2] ... [4, 7]
382 * freelist[3] ... [8, 15]
383 * :
384 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
385 * :
386 */
387
388 static struct vmem_freelist *
389 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
390 {
391 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
392 const int idx = SIZE2ORDER(qsize);
393
394 KASSERT(size != 0 && qsize != 0);
395 KASSERT((size & vm->vm_quantum_mask) == 0);
396 KASSERT(idx >= 0);
397 KASSERT(idx < VMEM_MAXORDER);
398
399 return &vm->vm_freelist[idx];
400 }
401
402 /*
403 * bt_freehead_toalloc: return the freelist for the given size and allocation
404 * strategy.
405 *
406 * for VM_INSTANTFIT, return the list in which any blocks are large enough
407 * for the requested size. otherwise, return the list which can have blocks
408 * large enough for the requested size.
409 */
410
411 static struct vmem_freelist *
412 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
413 {
414 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
415 int idx = SIZE2ORDER(qsize);
416
417 KASSERT(size != 0 && qsize != 0);
418 KASSERT((size & vm->vm_quantum_mask) == 0);
419
420 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
421 idx++;
422 /* check too large request? */
423 }
424 KASSERT(idx >= 0);
425 KASSERT(idx < VMEM_MAXORDER);
426
427 return &vm->vm_freelist[idx];
428 }
429
430 /* ---- boundary tag hash */
431
432 static struct vmem_hashlist *
433 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
434 {
435 struct vmem_hashlist *list;
436 unsigned int hash;
437
438 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
439 list = &vm->vm_hashlist[hash & vm->vm_hashmask];
440
441 return list;
442 }
443
444 static bt_t *
445 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
446 {
447 struct vmem_hashlist *list;
448 bt_t *bt;
449
450 list = bt_hashhead(vm, addr);
451 LIST_FOREACH(bt, list, bt_hashlist) {
452 if (bt->bt_start == addr) {
453 break;
454 }
455 }
456
457 return bt;
458 }
459
460 static void
461 bt_rembusy(vmem_t *vm, bt_t *bt)
462 {
463
464 KASSERT(vm->vm_nbusytag > 0);
465 vm->vm_inuse -= bt->bt_size;
466 vm->vm_nbusytag--;
467 LIST_REMOVE(bt, bt_hashlist);
468 }
469
470 static void
471 bt_insbusy(vmem_t *vm, bt_t *bt)
472 {
473 struct vmem_hashlist *list;
474
475 KASSERT(bt->bt_type == BT_TYPE_BUSY);
476
477 list = bt_hashhead(vm, bt->bt_start);
478 LIST_INSERT_HEAD(list, bt, bt_hashlist);
479 if (++vm->vm_nbusytag > vm->vm_maxbusytag) {
480 vm->vm_maxbusytag = vm->vm_nbusytag;
481 }
482 vm->vm_inuse += bt->bt_size;
483 }
484
485 /* ---- boundary tag list */
486
487 static void
488 bt_remseg(vmem_t *vm, bt_t *bt)
489 {
490
491 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
492 }
493
494 static void
495 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
496 {
497
498 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
499 }
500
501 static void
502 bt_insseg_tail(vmem_t *vm, bt_t *bt)
503 {
504
505 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
506 }
507
508 static void
509 bt_remfree(vmem_t *vm, bt_t *bt)
510 {
511
512 KASSERT(bt->bt_type == BT_TYPE_FREE);
513
514 LIST_REMOVE(bt, bt_freelist);
515 }
516
517 static void
518 bt_insfree(vmem_t *vm, bt_t *bt)
519 {
520 struct vmem_freelist *list;
521
522 list = bt_freehead_tofree(vm, bt->bt_size);
523 LIST_INSERT_HEAD(list, bt, bt_freelist);
524 }
525
526 /* ---- vmem internal functions */
527
528 #if defined(QCACHE)
529 static inline vm_flag_t
530 prf_to_vmf(int prflags)
531 {
532 vm_flag_t vmflags;
533
534 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
535 if ((prflags & PR_WAITOK) != 0) {
536 vmflags = VM_SLEEP;
537 } else {
538 vmflags = VM_NOSLEEP;
539 }
540 return vmflags;
541 }
542
543 static inline int
544 vmf_to_prf(vm_flag_t vmflags)
545 {
546 int prflags;
547
548 if ((vmflags & VM_SLEEP) != 0) {
549 prflags = PR_WAITOK;
550 } else {
551 prflags = PR_NOWAIT;
552 }
553 return prflags;
554 }
555
556 static size_t
557 qc_poolpage_size(size_t qcache_max)
558 {
559 int i;
560
561 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
562 /* nothing */
563 }
564 return ORDER2SIZE(i);
565 }
566
567 static void *
568 qc_poolpage_alloc(struct pool *pool, int prflags)
569 {
570 qcache_t *qc = QC_POOL_TO_QCACHE(pool);
571 vmem_t *vm = qc->qc_vmem;
572 vmem_addr_t addr;
573
574 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
575 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
576 return NULL;
577 return (void *)addr;
578 }
579
580 static void
581 qc_poolpage_free(struct pool *pool, void *addr)
582 {
583 qcache_t *qc = QC_POOL_TO_QCACHE(pool);
584 vmem_t *vm = qc->qc_vmem;
585
586 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
587 }
588
589 static void
590 qc_init(vmem_t *vm, size_t qcache_max, int ipl)
591 {
592 qcache_t *prevqc;
593 struct pool_allocator *pa;
594 int qcache_idx_max;
595 int i;
596
597 KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
598 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
599 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
600 }
601 vm->vm_qcache_max = qcache_max;
602 pa = &vm->vm_qcache_allocator;
603 memset(pa, 0, sizeof(*pa));
604 pa->pa_alloc = qc_poolpage_alloc;
605 pa->pa_free = qc_poolpage_free;
606 pa->pa_pagesz = qc_poolpage_size(qcache_max);
607
608 qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
609 prevqc = NULL;
610 for (i = qcache_idx_max; i > 0; i--) {
611 qcache_t *qc = &vm->vm_qcache_store[i - 1];
612 size_t size = i << vm->vm_quantum_shift;
613 pool_cache_t pc;
614
615 qc->qc_vmem = vm;
616 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
617 vm->vm_name, size);
618
619 pc = pool_cache_init(size,
620 ORDER2SIZE(vm->vm_quantum_shift), 0,
621 PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
622 qc->qc_name, pa, ipl, NULL, NULL, NULL);
623
624 KASSERT(pc);
625
626 qc->qc_cache = pc;
627 KASSERT(qc->qc_cache != NULL); /* XXX */
628 if (prevqc != NULL &&
629 qc->qc_cache->pc_pool.pr_itemsperpage ==
630 prevqc->qc_cache->pc_pool.pr_itemsperpage) {
631 pool_cache_destroy(qc->qc_cache);
632 vm->vm_qcache[i - 1] = prevqc;
633 continue;
634 }
635 qc->qc_cache->pc_pool.pr_qcache = qc;
636 vm->vm_qcache[i - 1] = qc;
637 prevqc = qc;
638 }
639 }
640
641 static void
642 qc_destroy(vmem_t *vm)
643 {
644 const qcache_t *prevqc;
645 int i;
646 int qcache_idx_max;
647
648 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
649 prevqc = NULL;
650 for (i = 0; i < qcache_idx_max; i++) {
651 qcache_t *qc = vm->vm_qcache[i];
652
653 if (prevqc == qc) {
654 continue;
655 }
656 pool_cache_destroy(qc->qc_cache);
657 prevqc = qc;
658 }
659 }
660 #endif
661
662 #if defined(_KERNEL)
663 static void
664 vmem_bootstrap(void)
665 {
666
667 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
668 mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
669 mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
670
671 while (static_bt_count-- > 0) {
672 bt_t *bt = &static_bts[static_bt_count];
673 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
674 VMEM_EVCNT_INCR(static_bt_count);
675 vmem_btag_freelist_count++;
676 }
677 vmem_bootstrapped = TRUE;
678 }
679
680 void
681 vmem_subsystem_init(vmem_t *vm)
682 {
683
684 kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
685 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
686 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
687 IPL_VM);
688
689 kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
690 0, 0, PAGE_SIZE,
691 uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
692 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
693
694 pool_init(&vmem_btag_pool, sizeof(bt_t), coherency_unit, 0,
695 PR_PHINPAGE, "vmembt", &pool_allocator_vmem_meta, IPL_VM);
696 }
697 #endif /* defined(_KERNEL) */
698
699 static int
700 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
701 int spanbttype)
702 {
703 bt_t *btspan;
704 bt_t *btfree;
705
706 VMEM_ASSERT_LOCKED(vm);
707 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
708 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
709 KASSERT(spanbttype == BT_TYPE_SPAN ||
710 spanbttype == BT_TYPE_SPAN_STATIC);
711
712 btspan = bt_alloc(vm, flags);
713 if (btspan == NULL) {
714 return ENOMEM;
715 }
716 btfree = bt_alloc(vm, flags);
717 if (btfree == NULL) {
718 bt_free(vm, btspan);
719 return ENOMEM;
720 }
721
722 btspan->bt_type = spanbttype;
723 btspan->bt_start = addr;
724 btspan->bt_size = size;
725
726 btfree->bt_type = BT_TYPE_FREE;
727 btfree->bt_start = addr;
728 btfree->bt_size = size;
729
730 bt_insseg_tail(vm, btspan);
731 bt_insseg(vm, btfree, btspan);
732 bt_insfree(vm, btfree);
733 vm->vm_size += size;
734
735 return 0;
736 }
737
738 static void
739 vmem_destroy1(vmem_t *vm)
740 {
741
742 #if defined(QCACHE)
743 qc_destroy(vm);
744 #endif /* defined(QCACHE) */
745 VMEM_LOCK(vm);
746
747 for (int i = 0; i < vm->vm_hashsize; i++) {
748 bt_t *bt;
749
750 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
751 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
752 LIST_REMOVE(bt, bt_hashlist);
753 bt_free(vm, bt);
754 }
755 }
756
757 /* bt_freetrim() drops the lock. */
758 bt_freetrim(vm, 0);
759 if (vm->vm_hashlist != &vm->vm_hash0) {
760 xfree(vm->vm_hashlist,
761 sizeof(struct vmem_hashlist) * vm->vm_hashsize);
762 }
763
764 VMEM_CONDVAR_DESTROY(vm);
765 VMEM_LOCK_DESTROY(vm);
766 xfree(vm, sizeof(*vm));
767 }
768
769 static int
770 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
771 {
772 vmem_addr_t addr;
773 int rc;
774
775 VMEM_ASSERT_LOCKED(vm);
776
777 if (vm->vm_importfn == NULL) {
778 return EINVAL;
779 }
780
781 if (vm->vm_flags & VM_LARGEIMPORT) {
782 size *= 16;
783 }
784
785 VMEM_UNLOCK(vm);
786 if (vm->vm_flags & VM_XIMPORT) {
787 rc = __FPTRCAST(vmem_ximport_t *, vm->vm_importfn)(vm->vm_arg,
788 size, &size, flags, &addr);
789 } else {
790 rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
791 }
792 VMEM_LOCK(vm);
793
794 if (rc) {
795 return ENOMEM;
796 }
797
798 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
799 VMEM_UNLOCK(vm);
800 (*vm->vm_releasefn)(vm->vm_arg, addr, size);
801 VMEM_LOCK(vm);
802 return ENOMEM;
803 }
804
805 return 0;
806 }
807
808 static int
809 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
810 {
811 bt_t *bt;
812 int i;
813 struct vmem_hashlist *newhashlist;
814 struct vmem_hashlist *oldhashlist;
815 size_t oldhashsize;
816
817 KASSERT(newhashsize > 0);
818
819 /* Round hash size up to a power of 2. */
820 newhashsize = 1 << (ilog2(newhashsize) + 1);
821
822 newhashlist =
823 xmalloc(sizeof(struct vmem_hashlist) * newhashsize, flags);
824 if (newhashlist == NULL) {
825 return ENOMEM;
826 }
827 for (i = 0; i < newhashsize; i++) {
828 LIST_INIT(&newhashlist[i]);
829 }
830
831 VMEM_LOCK(vm);
832 /* Decay back to a small hash slowly. */
833 if (vm->vm_maxbusytag >= 2) {
834 vm->vm_maxbusytag = vm->vm_maxbusytag / 2 - 1;
835 if (vm->vm_nbusytag > vm->vm_maxbusytag) {
836 vm->vm_maxbusytag = vm->vm_nbusytag;
837 }
838 } else {
839 vm->vm_maxbusytag = vm->vm_nbusytag;
840 }
841 oldhashlist = vm->vm_hashlist;
842 oldhashsize = vm->vm_hashsize;
843 vm->vm_hashlist = newhashlist;
844 vm->vm_hashsize = newhashsize;
845 vm->vm_hashmask = newhashsize - 1;
846 if (oldhashlist == NULL) {
847 VMEM_UNLOCK(vm);
848 return 0;
849 }
850 for (i = 0; i < oldhashsize; i++) {
851 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
852 bt_rembusy(vm, bt); /* XXX */
853 bt_insbusy(vm, bt);
854 }
855 }
856 VMEM_UNLOCK(vm);
857
858 if (oldhashlist != &vm->vm_hash0) {
859 xfree(oldhashlist,
860 sizeof(struct vmem_hashlist) * oldhashsize);
861 }
862
863 return 0;
864 }
865
866 /*
867 * vmem_fit: check if a bt can satisfy the given restrictions.
868 *
869 * it's a caller's responsibility to ensure the region is big enough
870 * before calling us.
871 */
872
873 static int
874 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
875 vmem_size_t phase, vmem_size_t nocross,
876 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
877 {
878 vmem_addr_t start;
879 vmem_addr_t end;
880
881 KASSERT(size > 0);
882 KASSERT(bt->bt_size >= size); /* caller's responsibility */
883
884 /*
885 * XXX assumption: vmem_addr_t and vmem_size_t are
886 * unsigned integer of the same size.
887 */
888
889 start = bt->bt_start;
890 if (start < minaddr) {
891 start = minaddr;
892 }
893 end = BT_END(bt);
894 if (end > maxaddr) {
895 end = maxaddr;
896 }
897 if (start > end) {
898 return ENOMEM;
899 }
900
901 start = VMEM_ALIGNUP(start - phase, align) + phase;
902 if (start < bt->bt_start) {
903 start += align;
904 }
905 if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
906 KASSERT(align < nocross);
907 start = VMEM_ALIGNUP(start - phase, nocross) + phase;
908 }
909 if (start <= end && end - start >= size - 1) {
910 KASSERT((start & (align - 1)) == phase);
911 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
912 KASSERT(minaddr <= start);
913 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
914 KASSERT(bt->bt_start <= start);
915 KASSERT(BT_END(bt) - start >= size - 1);
916 *addrp = start;
917 return 0;
918 }
919 return ENOMEM;
920 }
921
922 /* ---- vmem API */
923
924 /*
925 * vmem_init: creates a vmem arena.
926 */
927
928 vmem_t *
929 vmem_init(vmem_t *vm, const char *name,
930 vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
931 vmem_import_t *importfn, vmem_release_t *releasefn,
932 vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
933 {
934 int i;
935
936 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
937 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
938 KASSERT(quantum > 0);
939
940 #if defined(_KERNEL)
941 /* XXX: SMP, we get called early... */
942 if (!vmem_bootstrapped) {
943 vmem_bootstrap();
944 }
945 #endif /* defined(_KERNEL) */
946
947 if (vm == NULL) {
948 vm = xmalloc(sizeof(*vm), flags);
949 }
950 if (vm == NULL) {
951 return NULL;
952 }
953
954 VMEM_CONDVAR_INIT(vm, "vmem");
955 VMEM_LOCK_INIT(vm, ipl);
956 vm->vm_flags = flags;
957 vm->vm_nfreetags = 0;
958 LIST_INIT(&vm->vm_freetags);
959 strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
960 vm->vm_quantum_mask = quantum - 1;
961 vm->vm_quantum_shift = SIZE2ORDER(quantum);
962 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
963 vm->vm_importfn = importfn;
964 vm->vm_releasefn = releasefn;
965 vm->vm_arg = arg;
966 vm->vm_nbusytag = 0;
967 vm->vm_maxbusytag = 0;
968 vm->vm_size = 0;
969 vm->vm_inuse = 0;
970 #if defined(QCACHE)
971 qc_init(vm, qcache_max, ipl);
972 #endif /* defined(QCACHE) */
973
974 TAILQ_INIT(&vm->vm_seglist);
975 for (i = 0; i < VMEM_MAXORDER; i++) {
976 LIST_INIT(&vm->vm_freelist[i]);
977 }
978 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
979 vm->vm_hashsize = 1;
980 vm->vm_hashmask = vm->vm_hashsize - 1;
981 vm->vm_hashlist = &vm->vm_hash0;
982
983 if (size != 0) {
984 if (vmem_add(vm, base, size, flags) != 0) {
985 vmem_destroy1(vm);
986 return NULL;
987 }
988 }
989
990 #if defined(_KERNEL)
991 if (flags & VM_BOOTSTRAP) {
992 bt_refill(vm);
993 }
994
995 mutex_enter(&vmem_list_lock);
996 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
997 mutex_exit(&vmem_list_lock);
998 #endif /* defined(_KERNEL) */
999
1000 return vm;
1001 }
1002
1003
1004
1005 /*
1006 * vmem_create: create an arena.
1007 *
1008 * => must not be called from interrupt context.
1009 */
1010
1011 vmem_t *
1012 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1013 vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
1014 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1015 {
1016
1017 KASSERT((flags & (VM_XIMPORT)) == 0);
1018
1019 return vmem_init(NULL, name, base, size, quantum,
1020 importfn, releasefn, source, qcache_max, flags, ipl);
1021 }
1022
1023 /*
1024 * vmem_xcreate: create an arena takes alternative import func.
1025 *
1026 * => must not be called from interrupt context.
1027 */
1028
1029 vmem_t *
1030 vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
1031 vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
1032 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1033 {
1034
1035 KASSERT((flags & (VM_XIMPORT)) == 0);
1036
1037 return vmem_init(NULL, name, base, size, quantum,
1038 __FPTRCAST(vmem_import_t *, importfn), releasefn, source,
1039 qcache_max, flags | VM_XIMPORT, ipl);
1040 }
1041
1042 void
1043 vmem_destroy(vmem_t *vm)
1044 {
1045
1046 #if defined(_KERNEL)
1047 mutex_enter(&vmem_list_lock);
1048 LIST_REMOVE(vm, vm_alllist);
1049 mutex_exit(&vmem_list_lock);
1050 #endif /* defined(_KERNEL) */
1051
1052 vmem_destroy1(vm);
1053 }
1054
1055 vmem_size_t
1056 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1057 {
1058
1059 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1060 }
1061
1062 /*
1063 * vmem_alloc: allocate resource from the arena.
1064 */
1065
1066 int
1067 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1068 {
1069 const vm_flag_t strat __diagused = flags & VM_FITMASK;
1070 int error;
1071
1072 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1073 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1074
1075 KASSERT(size > 0);
1076 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1077 if ((flags & VM_SLEEP) != 0) {
1078 ASSERT_SLEEPABLE();
1079 }
1080
1081 #if defined(QCACHE)
1082 if (size <= vm->vm_qcache_max) {
1083 void *p;
1084 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1085 qcache_t *qc = vm->vm_qcache[qidx - 1];
1086
1087 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1088 if (addrp != NULL)
1089 *addrp = (vmem_addr_t)p;
1090 error = (p == NULL) ? ENOMEM : 0;
1091 goto out;
1092 }
1093 #endif /* defined(QCACHE) */
1094
1095 error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1096 flags, addrp);
1097 out:
1098 KASSERTMSG(error || addrp == NULL ||
1099 (*addrp & vm->vm_quantum_mask) == 0,
1100 "vmem %s mask=0x%jx addr=0x%jx",
1101 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1102 KASSERT(error == 0 || (flags & VM_SLEEP) == 0);
1103 return error;
1104 }
1105
1106 int
1107 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1108 const vmem_size_t phase, const vmem_size_t nocross,
1109 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1110 vmem_addr_t *addrp)
1111 {
1112 struct vmem_freelist *list;
1113 struct vmem_freelist *first;
1114 struct vmem_freelist *end;
1115 bt_t *bt;
1116 bt_t *btnew;
1117 bt_t *btnew2;
1118 const vmem_size_t size = vmem_roundup_size(vm, size0);
1119 vm_flag_t strat = flags & VM_FITMASK;
1120 vmem_addr_t start;
1121 int rc;
1122
1123 KASSERT(size0 > 0);
1124 KASSERT(size > 0);
1125 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1126 if ((flags & VM_SLEEP) != 0) {
1127 ASSERT_SLEEPABLE();
1128 }
1129 KASSERT((align & vm->vm_quantum_mask) == 0);
1130 KASSERT((align & (align - 1)) == 0);
1131 KASSERT((phase & vm->vm_quantum_mask) == 0);
1132 KASSERT((nocross & vm->vm_quantum_mask) == 0);
1133 KASSERT((nocross & (nocross - 1)) == 0);
1134 KASSERT((align == 0 && phase == 0) || phase < align);
1135 KASSERT(nocross == 0 || nocross >= size);
1136 KASSERT(minaddr <= maxaddr);
1137 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1138
1139 if (align == 0) {
1140 align = vm->vm_quantum_mask + 1;
1141 }
1142
1143 /*
1144 * allocate boundary tags before acquiring the vmem lock.
1145 */
1146 VMEM_LOCK(vm);
1147 btnew = bt_alloc(vm, flags);
1148 if (btnew == NULL) {
1149 VMEM_UNLOCK(vm);
1150 return ENOMEM;
1151 }
1152 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1153 if (btnew2 == NULL) {
1154 bt_free(vm, btnew);
1155 VMEM_UNLOCK(vm);
1156 return ENOMEM;
1157 }
1158
1159 /*
1160 * choose a free block from which we allocate.
1161 */
1162 retry_strat:
1163 first = bt_freehead_toalloc(vm, size, strat);
1164 end = &vm->vm_freelist[VMEM_MAXORDER];
1165 retry:
1166 bt = NULL;
1167 vmem_check(vm);
1168 if (strat == VM_INSTANTFIT) {
1169 /*
1170 * just choose the first block which satisfies our restrictions.
1171 *
1172 * note that we don't need to check the size of the blocks
1173 * because any blocks found on these list should be larger than
1174 * the given size.
1175 */
1176 for (list = first; list < end; list++) {
1177 bt = LIST_FIRST(list);
1178 if (bt != NULL) {
1179 rc = vmem_fit(bt, size, align, phase,
1180 nocross, minaddr, maxaddr, &start);
1181 if (rc == 0) {
1182 goto gotit;
1183 }
1184 /*
1185 * don't bother to follow the bt_freelist link
1186 * here. the list can be very long and we are
1187 * told to run fast. blocks from the later free
1188 * lists are larger and have better chances to
1189 * satisfy our restrictions.
1190 */
1191 }
1192 }
1193 } else { /* VM_BESTFIT */
1194 /*
1195 * we assume that, for space efficiency, it's better to
1196 * allocate from a smaller block. thus we will start searching
1197 * from the lower-order list than VM_INSTANTFIT.
1198 * however, don't bother to find the smallest block in a free
1199 * list because the list can be very long. we can revisit it
1200 * if/when it turns out to be a problem.
1201 *
1202 * note that the 'first' list can contain blocks smaller than
1203 * the requested size. thus we need to check bt_size.
1204 */
1205 for (list = first; list < end; list++) {
1206 LIST_FOREACH(bt, list, bt_freelist) {
1207 if (bt->bt_size >= size) {
1208 rc = vmem_fit(bt, size, align, phase,
1209 nocross, minaddr, maxaddr, &start);
1210 if (rc == 0) {
1211 goto gotit;
1212 }
1213 }
1214 }
1215 }
1216 }
1217 #if 1
1218 if (strat == VM_INSTANTFIT) {
1219 strat = VM_BESTFIT;
1220 goto retry_strat;
1221 }
1222 #endif
1223 if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
1224
1225 /*
1226 * XXX should try to import a region large enough to
1227 * satisfy restrictions?
1228 */
1229
1230 goto fail;
1231 }
1232 /* XXX eeek, minaddr & maxaddr not respected */
1233 if (vmem_import(vm, size, flags) == 0) {
1234 goto retry;
1235 }
1236 /* XXX */
1237
1238 if ((flags & VM_SLEEP) != 0) {
1239 vmem_kick_pdaemon();
1240 VMEM_CONDVAR_WAIT(vm);
1241 goto retry;
1242 }
1243 fail:
1244 bt_free(vm, btnew);
1245 bt_free(vm, btnew2);
1246 VMEM_UNLOCK(vm);
1247 return ENOMEM;
1248
1249 gotit:
1250 KASSERT(bt->bt_type == BT_TYPE_FREE);
1251 KASSERT(bt->bt_size >= size);
1252 bt_remfree(vm, bt);
1253 vmem_check(vm);
1254 if (bt->bt_start != start) {
1255 btnew2->bt_type = BT_TYPE_FREE;
1256 btnew2->bt_start = bt->bt_start;
1257 btnew2->bt_size = start - bt->bt_start;
1258 bt->bt_start = start;
1259 bt->bt_size -= btnew2->bt_size;
1260 bt_insfree(vm, btnew2);
1261 bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1262 btnew2 = NULL;
1263 vmem_check(vm);
1264 }
1265 KASSERT(bt->bt_start == start);
1266 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1267 /* split */
1268 btnew->bt_type = BT_TYPE_BUSY;
1269 btnew->bt_start = bt->bt_start;
1270 btnew->bt_size = size;
1271 bt->bt_start = bt->bt_start + size;
1272 bt->bt_size -= size;
1273 bt_insfree(vm, bt);
1274 bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1275 bt_insbusy(vm, btnew);
1276 vmem_check(vm);
1277 } else {
1278 bt->bt_type = BT_TYPE_BUSY;
1279 bt_insbusy(vm, bt);
1280 vmem_check(vm);
1281 bt_free(vm, btnew);
1282 btnew = bt;
1283 }
1284 if (btnew2 != NULL) {
1285 bt_free(vm, btnew2);
1286 }
1287 KASSERT(btnew->bt_size >= size);
1288 btnew->bt_type = BT_TYPE_BUSY;
1289 if (addrp != NULL)
1290 *addrp = btnew->bt_start;
1291 VMEM_UNLOCK(vm);
1292 KASSERTMSG(addrp == NULL ||
1293 (*addrp & vm->vm_quantum_mask) == 0,
1294 "vmem %s mask=0x%jx addr=0x%jx",
1295 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1296 return 0;
1297 }
1298
1299 /*
1300 * vmem_free: free the resource to the arena.
1301 */
1302
1303 void
1304 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1305 {
1306
1307 KASSERT(size > 0);
1308 KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
1309 "vmem %s mask=0x%jx addr=0x%jx",
1310 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1311
1312 #if defined(QCACHE)
1313 if (size <= vm->vm_qcache_max) {
1314 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1315 qcache_t *qc = vm->vm_qcache[qidx - 1];
1316
1317 pool_cache_put(qc->qc_cache, (void *)addr);
1318 return;
1319 }
1320 #endif /* defined(QCACHE) */
1321
1322 vmem_xfree(vm, addr, size);
1323 }
1324
1325 void
1326 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1327 {
1328 bt_t *bt;
1329
1330 KASSERT(size > 0);
1331 KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
1332 "vmem %s mask=0x%jx addr=0x%jx",
1333 vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1334
1335 VMEM_LOCK(vm);
1336
1337 bt = bt_lookupbusy(vm, addr);
1338 KASSERTMSG(bt != NULL, "vmem %s addr 0x%jx size 0x%jx",
1339 vm->vm_name, (uintmax_t)addr, (uintmax_t)size);
1340 KASSERT(bt->bt_start == addr);
1341 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1342 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1343
1344 /* vmem_xfree_bt() drops the lock. */
1345 vmem_xfree_bt(vm, bt);
1346 }
1347
1348 void
1349 vmem_xfreeall(vmem_t *vm)
1350 {
1351 bt_t *bt;
1352
1353 /* This can't be used if the arena has a quantum cache. */
1354 KASSERT(vm->vm_qcache_max == 0);
1355
1356 for (;;) {
1357 VMEM_LOCK(vm);
1358 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1359 if (bt->bt_type == BT_TYPE_BUSY)
1360 break;
1361 }
1362 if (bt != NULL) {
1363 /* vmem_xfree_bt() drops the lock. */
1364 vmem_xfree_bt(vm, bt);
1365 } else {
1366 VMEM_UNLOCK(vm);
1367 return;
1368 }
1369 }
1370 }
1371
1372 static void
1373 vmem_xfree_bt(vmem_t *vm, bt_t *bt)
1374 {
1375 bt_t *t;
1376
1377 VMEM_ASSERT_LOCKED(vm);
1378
1379 KASSERT(bt->bt_type == BT_TYPE_BUSY);
1380 bt_rembusy(vm, bt);
1381 bt->bt_type = BT_TYPE_FREE;
1382
1383 /* coalesce */
1384 t = TAILQ_NEXT(bt, bt_seglist);
1385 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1386 KASSERT(BT_END(bt) < t->bt_start); /* YYY */
1387 bt_remfree(vm, t);
1388 bt_remseg(vm, t);
1389 bt->bt_size += t->bt_size;
1390 bt_free(vm, t);
1391 }
1392 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1393 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1394 KASSERT(BT_END(t) < bt->bt_start); /* YYY */
1395 bt_remfree(vm, t);
1396 bt_remseg(vm, t);
1397 bt->bt_size += t->bt_size;
1398 bt->bt_start = t->bt_start;
1399 bt_free(vm, t);
1400 }
1401
1402 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1403 KASSERT(t != NULL);
1404 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1405 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1406 t->bt_size == bt->bt_size) {
1407 vmem_addr_t spanaddr;
1408 vmem_size_t spansize;
1409
1410 KASSERT(t->bt_start == bt->bt_start);
1411 spanaddr = bt->bt_start;
1412 spansize = bt->bt_size;
1413 bt_remseg(vm, bt);
1414 bt_free(vm, bt);
1415 bt_remseg(vm, t);
1416 bt_free(vm, t);
1417 vm->vm_size -= spansize;
1418 VMEM_CONDVAR_BROADCAST(vm);
1419 /* bt_freetrim() drops the lock. */
1420 bt_freetrim(vm, BT_MAXFREE);
1421 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1422 } else {
1423 bt_insfree(vm, bt);
1424 VMEM_CONDVAR_BROADCAST(vm);
1425 /* bt_freetrim() drops the lock. */
1426 bt_freetrim(vm, BT_MAXFREE);
1427 }
1428 }
1429
1430 /*
1431 * vmem_add:
1432 *
1433 * => caller must ensure appropriate spl,
1434 * if the arena can be accessed from interrupt context.
1435 */
1436
1437 int
1438 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1439 {
1440 int rv;
1441
1442 VMEM_LOCK(vm);
1443 rv = vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1444 VMEM_UNLOCK(vm);
1445
1446 return rv;
1447 }
1448
1449 /*
1450 * vmem_size: information about arenas size
1451 *
1452 * => return free/allocated size in arena
1453 */
1454 vmem_size_t
1455 vmem_size(vmem_t *vm, int typemask)
1456 {
1457
1458 switch (typemask) {
1459 case VMEM_ALLOC:
1460 return vm->vm_inuse;
1461 case VMEM_FREE:
1462 return vm->vm_size - vm->vm_inuse;
1463 case VMEM_FREE|VMEM_ALLOC:
1464 return vm->vm_size;
1465 default:
1466 panic("vmem_size");
1467 }
1468 }
1469
1470 /* ---- rehash */
1471
1472 #if defined(_KERNEL)
1473 static struct callout vmem_rehash_ch;
1474 static int vmem_rehash_interval;
1475 static struct workqueue *vmem_rehash_wq;
1476 static struct work vmem_rehash_wk;
1477
1478 static void
1479 vmem_rehash_all(struct work *wk, void *dummy)
1480 {
1481 vmem_t *vm;
1482
1483 KASSERT(wk == &vmem_rehash_wk);
1484 mutex_enter(&vmem_list_lock);
1485 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1486 size_t desired;
1487 size_t current;
1488
1489 desired = atomic_load_relaxed(&vm->vm_maxbusytag);
1490 current = atomic_load_relaxed(&vm->vm_hashsize);
1491
1492 if (desired > VMEM_HASHSIZE_MAX) {
1493 desired = VMEM_HASHSIZE_MAX;
1494 } else if (desired < VMEM_HASHSIZE_MIN) {
1495 desired = VMEM_HASHSIZE_MIN;
1496 }
1497 if (desired > current * 2 || desired * 2 < current) {
1498 vmem_rehash(vm, desired, VM_NOSLEEP);
1499 }
1500 }
1501 mutex_exit(&vmem_list_lock);
1502
1503 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1504 }
1505
1506 static void
1507 vmem_rehash_all_kick(void *dummy)
1508 {
1509
1510 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1511 }
1512
1513 void
1514 vmem_rehash_start(void)
1515 {
1516 int error;
1517
1518 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1519 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1520 if (error) {
1521 panic("%s: workqueue_create %d\n", __func__, error);
1522 }
1523 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1524 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1525
1526 vmem_rehash_interval = hz * 10;
1527 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1528 }
1529 #endif /* defined(_KERNEL) */
1530
1531 /* ---- debug */
1532
1533 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1534
1535 static void bt_dump(const bt_t *, void (*)(const char *, ...)
1536 __printflike(1, 2));
1537
1538 static const char *
1539 bt_type_string(int type)
1540 {
1541 static const char * const table[] = {
1542 [BT_TYPE_BUSY] = "busy",
1543 [BT_TYPE_FREE] = "free",
1544 [BT_TYPE_SPAN] = "span",
1545 [BT_TYPE_SPAN_STATIC] = "static span",
1546 };
1547
1548 if (type >= __arraycount(table)) {
1549 return "BOGUS";
1550 }
1551 return table[type];
1552 }
1553
1554 static void
1555 bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1556 {
1557
1558 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1559 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1560 bt->bt_type, bt_type_string(bt->bt_type));
1561 }
1562
1563 static void
1564 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2))
1565 {
1566 const bt_t *bt;
1567 int i;
1568
1569 (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1570 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1571 bt_dump(bt, pr);
1572 }
1573
1574 for (i = 0; i < VMEM_MAXORDER; i++) {
1575 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1576
1577 if (LIST_EMPTY(fl)) {
1578 continue;
1579 }
1580
1581 (*pr)("freelist[%d]\n", i);
1582 LIST_FOREACH(bt, fl, bt_freelist) {
1583 bt_dump(bt, pr);
1584 }
1585 }
1586 }
1587
1588 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1589
1590 #if defined(DDB)
1591 static bt_t *
1592 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1593 {
1594 bt_t *bt;
1595
1596 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1597 if (BT_ISSPAN_P(bt)) {
1598 continue;
1599 }
1600 if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1601 return bt;
1602 }
1603 }
1604
1605 return NULL;
1606 }
1607
1608 void
1609 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1610 {
1611 vmem_t *vm;
1612
1613 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1614 bt_t *bt;
1615
1616 bt = vmem_whatis_lookup(vm, addr);
1617 if (bt == NULL) {
1618 continue;
1619 }
1620 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1621 (void *)addr, (void *)bt->bt_start,
1622 (size_t)(addr - bt->bt_start), vm->vm_name,
1623 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1624 }
1625 }
1626
1627 void
1628 vmem_printall(const char *modif, void (*pr)(const char *, ...))
1629 {
1630 const vmem_t *vm;
1631
1632 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1633 vmem_dump(vm, pr);
1634 }
1635 }
1636
1637 void
1638 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1639 {
1640 const vmem_t *vm = (const void *)addr;
1641
1642 vmem_dump(vm, pr);
1643 }
1644 #endif /* defined(DDB) */
1645
1646 #if defined(_KERNEL)
1647 #define vmem_printf printf
1648 #else
1649 #include <stdio.h>
1650 #include <stdarg.h>
1651
1652 static void
1653 vmem_printf(const char *fmt, ...)
1654 {
1655 va_list ap;
1656 va_start(ap, fmt);
1657 vprintf(fmt, ap);
1658 va_end(ap);
1659 }
1660 #endif
1661
1662 #if defined(VMEM_SANITY)
1663
1664 static bool
1665 vmem_check_sanity(vmem_t *vm)
1666 {
1667 const bt_t *bt, *bt2;
1668
1669 KASSERT(vm != NULL);
1670
1671 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1672 if (bt->bt_start > BT_END(bt)) {
1673 printf("corrupted tag\n");
1674 bt_dump(bt, vmem_printf);
1675 return false;
1676 }
1677 }
1678 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1679 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1680 if (bt == bt2) {
1681 continue;
1682 }
1683 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1684 continue;
1685 }
1686 if (bt->bt_start <= BT_END(bt2) &&
1687 bt2->bt_start <= BT_END(bt)) {
1688 printf("overwrapped tags\n");
1689 bt_dump(bt, vmem_printf);
1690 bt_dump(bt2, vmem_printf);
1691 return false;
1692 }
1693 }
1694 }
1695
1696 return true;
1697 }
1698
1699 static void
1700 vmem_check(vmem_t *vm)
1701 {
1702
1703 if (!vmem_check_sanity(vm)) {
1704 panic("insanity vmem %p", vm);
1705 }
1706 }
1707
1708 #endif /* defined(VMEM_SANITY) */
1709
1710 #if defined(UNITTEST)
1711 int
1712 main(void)
1713 {
1714 int rc;
1715 vmem_t *vm;
1716 vmem_addr_t p;
1717 struct reg {
1718 vmem_addr_t p;
1719 vmem_size_t sz;
1720 bool x;
1721 } *reg = NULL;
1722 int nreg = 0;
1723 int nalloc = 0;
1724 int nfree = 0;
1725 vmem_size_t total = 0;
1726 #if 1
1727 vm_flag_t strat = VM_INSTANTFIT;
1728 #else
1729 vm_flag_t strat = VM_BESTFIT;
1730 #endif
1731
1732 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1733 #ifdef _KERNEL
1734 IPL_NONE
1735 #else
1736 0
1737 #endif
1738 );
1739 if (vm == NULL) {
1740 printf("vmem_create\n");
1741 exit(EXIT_FAILURE);
1742 }
1743 vmem_dump(vm, vmem_printf);
1744
1745 rc = vmem_add(vm, 0, 50, VM_SLEEP);
1746 assert(rc == 0);
1747 rc = vmem_add(vm, 100, 200, VM_SLEEP);
1748 assert(rc == 0);
1749 rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1750 assert(rc == 0);
1751 rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1752 assert(rc == 0);
1753 rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1754 assert(rc == 0);
1755 rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1756 assert(rc == 0);
1757 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1758 assert(rc == 0);
1759 rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1760 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1761 assert(rc != 0);
1762 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1763 assert(rc == 0 && p == 0);
1764 vmem_xfree(vm, p, 50);
1765 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1766 assert(rc == 0 && p == 0);
1767 rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1768 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1769 assert(rc != 0);
1770 rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1771 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1772 assert(rc != 0);
1773 rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1774 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1775 assert(rc == 0);
1776 vmem_dump(vm, vmem_printf);
1777 for (;;) {
1778 struct reg *r;
1779 int t = rand() % 100;
1780
1781 if (t > 45) {
1782 /* alloc */
1783 vmem_size_t sz = rand() % 500 + 1;
1784 bool x;
1785 vmem_size_t align, phase, nocross;
1786 vmem_addr_t minaddr, maxaddr;
1787
1788 if (t > 70) {
1789 x = true;
1790 /* XXX */
1791 align = 1 << (rand() % 15);
1792 phase = rand() % 65536;
1793 nocross = 1 << (rand() % 15);
1794 if (align <= phase) {
1795 phase = 0;
1796 }
1797 if (VMEM_CROSS_P(phase, phase + sz - 1,
1798 nocross)) {
1799 nocross = 0;
1800 }
1801 do {
1802 minaddr = rand() % 50000;
1803 maxaddr = rand() % 70000;
1804 } while (minaddr > maxaddr);
1805 printf("=== xalloc %" PRIu64
1806 " align=%" PRIu64 ", phase=%" PRIu64
1807 ", nocross=%" PRIu64 ", min=%" PRIu64
1808 ", max=%" PRIu64 "\n",
1809 (uint64_t)sz,
1810 (uint64_t)align,
1811 (uint64_t)phase,
1812 (uint64_t)nocross,
1813 (uint64_t)minaddr,
1814 (uint64_t)maxaddr);
1815 rc = vmem_xalloc(vm, sz, align, phase, nocross,
1816 minaddr, maxaddr, strat|VM_SLEEP, &p);
1817 } else {
1818 x = false;
1819 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1820 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1821 }
1822 printf("-> %" PRIu64 "\n", (uint64_t)p);
1823 vmem_dump(vm, vmem_printf);
1824 if (rc != 0) {
1825 if (x) {
1826 continue;
1827 }
1828 break;
1829 }
1830 nreg++;
1831 reg = realloc(reg, sizeof(*reg) * nreg);
1832 r = ®[nreg - 1];
1833 r->p = p;
1834 r->sz = sz;
1835 r->x = x;
1836 total += sz;
1837 nalloc++;
1838 } else if (nreg != 0) {
1839 /* free */
1840 r = ®[rand() % nreg];
1841 printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1842 (uint64_t)r->p, (uint64_t)r->sz);
1843 if (r->x) {
1844 vmem_xfree(vm, r->p, r->sz);
1845 } else {
1846 vmem_free(vm, r->p, r->sz);
1847 }
1848 total -= r->sz;
1849 vmem_dump(vm, vmem_printf);
1850 *r = reg[nreg - 1];
1851 nreg--;
1852 nfree++;
1853 }
1854 printf("total=%" PRIu64 "\n", (uint64_t)total);
1855 }
1856 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1857 (uint64_t)total, nalloc, nfree);
1858 exit(EXIT_SUCCESS);
1859 }
1860 #endif /* defined(UNITTEST) */
Cache object: 00c70259c05a4835493a01ca5e6d7323
|