FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_unit.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004 Poul-Henning Kamp
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 *
30 *
31 * Unit number allocation functions.
32 *
33 * These functions implement a mixed run-length/bitmap management of unit
34 * number spaces in a very memory efficient manner.
35 *
36 * Allocation policy is always lowest free number first.
37 *
38 * A return value of -1 signals that no more unit numbers are available.
39 *
40 * There is no cost associated with the range of unitnumbers, so unless
41 * the resource really is finite, specify INT_MAX to new_unrhdr() and
42 * forget about checking the return value.
43 *
44 * If a mutex is not provided when the unit number space is created, a
45 * default global mutex is used. The advantage to passing a mutex in, is
46 * that the alloc_unrl() function can be called with the mutex already
47 * held (it will not be released by alloc_unrl()).
48 *
49 * The allocation function alloc_unr{l}() never sleeps (but it may block on
50 * the mutex of course).
51 *
52 * Freeing a unit number may require allocating memory, and can therefore
53 * sleep so the free_unr() function does not come in a pre-locked variant.
54 *
55 * A userland test program is included.
56 *
57 * Memory usage is a very complex function of the exact allocation
58 * pattern, but always very compact:
59 * * For the very typical case where a single unbroken run of unit
60 * numbers are allocated 44 bytes are used on i386.
61 * * For a unit number space of 1000 units and the random pattern
62 * in the usermode test program included, the worst case usage
63 * was 252 bytes on i386 for 500 allocated and 500 free units.
64 * * For a unit number space of 10000 units and the random pattern
65 * in the usermode test program included, the worst case usage
66 * was 798 bytes on i386 for 5000 allocated and 5000 free units.
67 * * The worst case is where every other unit number is allocated and
68 * the rest are free. In that case 44 + N/4 bytes are used where
69 * N is the number of the highest unit allocated.
70 */
71
72 #include <sys/param.h>
73 #include <sys/types.h>
74 #include <sys/_unrhdr.h>
75
76 #ifdef _KERNEL
77
78 #include <sys/bitstring.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85
86 /*
87 * In theory it would be smarter to allocate the individual blocks
88 * with the zone allocator, but at this time the expectation is that
89 * there will typically not even be enough allocations to fill a single
90 * page, so we stick with malloc for now.
91 */
92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
93
94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
95 #define Free(foo) free(foo, M_UNIT)
96
97 static struct mtx unitmtx;
98
99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
100
101 #ifdef UNR64_LOCKED
102 uint64_t
103 alloc_unr64(struct unrhdr64 *unr64)
104 {
105 uint64_t item;
106
107 mtx_lock(&unitmtx);
108 item = unr64->counter++;
109 mtx_unlock(&unitmtx);
110 return (item);
111 }
112 #endif
113
114 #else /* ...USERLAND */
115
116 #include <bitstring.h>
117 #include <err.h>
118 #include <errno.h>
119 #include <getopt.h>
120 #include <stdbool.h>
121 #include <stdio.h>
122 #include <stdlib.h>
123 #include <string.h>
124
125 #define KASSERT(cond, arg) \
126 do { \
127 if (!(cond)) { \
128 printf arg; \
129 abort(); \
130 } \
131 } while (0)
132
133 static int no_alloc;
134 #define Malloc(foo) _Malloc(foo, __LINE__)
135 static void *
136 _Malloc(size_t foo, int line)
137 {
138
139 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
140 return (calloc(foo, 1));
141 }
142 #define Free(foo) free(foo)
143
144 struct unrhdr;
145
146
147 struct mtx {
148 int state;
149 } unitmtx;
150
151 static void
152 mtx_lock(struct mtx *mp)
153 {
154 KASSERT(mp->state == 0, ("mutex already locked"));
155 mp->state = 1;
156 }
157
158 static void
159 mtx_unlock(struct mtx *mp)
160 {
161 KASSERT(mp->state == 1, ("mutex not locked"));
162 mp->state = 0;
163 }
164
165 #define MA_OWNED 9
166
167 static void
168 mtx_assert(struct mtx *mp, int flag)
169 {
170 if (flag == MA_OWNED) {
171 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
172 }
173 }
174
175 #define CTASSERT(foo)
176 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0
177
178 #endif /* USERLAND */
179
180 /*
181 * This is our basic building block.
182 *
183 * It can be used in three different ways depending on the value of the ptr
184 * element:
185 * If ptr is NULL, it represents a run of free items.
186 * If ptr points to the unrhdr it represents a run of allocated items.
187 * Otherwise it points to a bitstring of allocated items.
188 *
189 * For runs the len field is the length of the run.
190 * For bitmaps the len field represents the number of allocated items.
191 *
192 * The bitmap is the same size as struct unr to optimize memory management.
193 */
194 struct unr {
195 TAILQ_ENTRY(unr) list;
196 u_int len;
197 void *ptr;
198 };
199
200 struct unrb {
201 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)];
202 };
203
204 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
205
206 /* Number of bits we can store in the bitmap */
207 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
208
209 /* Is the unrb empty in at least the first len bits? */
210 static inline bool
211 ub_empty(struct unrb *ub, int len) {
212 int first_set;
213
214 bit_ffs(ub->map, len, &first_set);
215 return (first_set == -1);
216 }
217
218 /* Is the unrb full? That is, is the number of set elements equal to len? */
219 static inline bool
220 ub_full(struct unrb *ub, int len)
221 {
222 int first_clear;
223
224 bit_ffc(ub->map, len, &first_clear);
225 return (first_clear == -1);
226 }
227
228
229 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
230 /*
231 * Consistency check function.
232 *
233 * Checks the internal consistency as well as we can.
234 *
235 * Called at all boundaries of this API.
236 */
237 static void
238 check_unrhdr(struct unrhdr *uh, int line)
239 {
240 struct unr *up;
241 struct unrb *ub;
242 int w;
243 u_int y, z;
244
245 y = uh->first;
246 z = 0;
247 TAILQ_FOREACH(up, &uh->head, list) {
248 z++;
249 if (up->ptr != uh && up->ptr != NULL) {
250 ub = up->ptr;
251 KASSERT (up->len <= NBITS,
252 ("UNR inconsistency: len %u max %zd (line %d)\n",
253 up->len, NBITS, line));
254 z++;
255 w = 0;
256 bit_count(ub->map, 0, up->len, &w);
257 y += w;
258 } else if (up->ptr != NULL)
259 y += up->len;
260 }
261 KASSERT (y == uh->busy,
262 ("UNR inconsistency: items %u found %u (line %d)\n",
263 uh->busy, y, line));
264 KASSERT (z == uh->alloc,
265 ("UNR inconsistency: chunks %u found %u (line %d)\n",
266 uh->alloc, z, line));
267 }
268
269 #else
270
271 static __inline void
272 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
273 {
274
275 }
276
277 #endif
278
279
280 /*
281 * Userland memory management. Just use calloc and keep track of how
282 * many elements we have allocated for check_unrhdr().
283 */
284
285 static __inline void *
286 new_unr(struct unrhdr *uh, void **p1, void **p2)
287 {
288 void *p;
289
290 uh->alloc++;
291 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
292 if (*p1 != NULL) {
293 p = *p1;
294 *p1 = NULL;
295 return (p);
296 } else {
297 p = *p2;
298 *p2 = NULL;
299 return (p);
300 }
301 }
302
303 static __inline void
304 delete_unr(struct unrhdr *uh, void *ptr)
305 {
306 struct unr *up;
307
308 uh->alloc--;
309 up = ptr;
310 TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
311 }
312
313 void
314 clean_unrhdrl(struct unrhdr *uh)
315 {
316 struct unr *up;
317
318 mtx_assert(uh->mtx, MA_OWNED);
319 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
320 TAILQ_REMOVE(&uh->ppfree, up, list);
321 mtx_unlock(uh->mtx);
322 Free(up);
323 mtx_lock(uh->mtx);
324 }
325
326 }
327
328 void
329 clean_unrhdr(struct unrhdr *uh)
330 {
331
332 mtx_lock(uh->mtx);
333 clean_unrhdrl(uh);
334 mtx_unlock(uh->mtx);
335 }
336
337 void
338 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
339 {
340
341 KASSERT(low >= 0 && low <= high,
342 ("UNR: use error: new_unrhdr(%d, %d)", low, high));
343 if (mutex != NULL)
344 uh->mtx = mutex;
345 else
346 uh->mtx = &unitmtx;
347 TAILQ_INIT(&uh->head);
348 TAILQ_INIT(&uh->ppfree);
349 uh->low = low;
350 uh->high = high;
351 uh->first = 0;
352 uh->last = 1 + (high - low);
353 check_unrhdr(uh, __LINE__);
354 }
355
356 /*
357 * Allocate a new unrheader set.
358 *
359 * Highest and lowest valid values given as parameters.
360 */
361
362 struct unrhdr *
363 new_unrhdr(int low, int high, struct mtx *mutex)
364 {
365 struct unrhdr *uh;
366
367 uh = Malloc(sizeof *uh);
368 init_unrhdr(uh, low, high, mutex);
369 return (uh);
370 }
371
372 void
373 delete_unrhdr(struct unrhdr *uh)
374 {
375
376 check_unrhdr(uh, __LINE__);
377 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
378 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
379 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
380 ("unrhdr has postponed item for free"));
381 Free(uh);
382 }
383
384 void
385 clear_unrhdr(struct unrhdr *uh)
386 {
387 struct unr *up, *uq;
388
389 KASSERT(TAILQ_EMPTY(&uh->ppfree),
390 ("unrhdr has postponed item for free"));
391 TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
392 if (up->ptr != uh) {
393 Free(up->ptr);
394 }
395 Free(up);
396 }
397 uh->busy = 0;
398 uh->alloc = 0;
399 init_unrhdr(uh, uh->low, uh->high, uh->mtx);
400
401 check_unrhdr(uh, __LINE__);
402 }
403
404 static __inline int
405 is_bitmap(struct unrhdr *uh, struct unr *up)
406 {
407 return (up->ptr != uh && up->ptr != NULL);
408 }
409
410 /*
411 * Look for sequence of items which can be combined into a bitmap, if
412 * multiple are present, take the one which saves most memory.
413 *
414 * Return (1) if a sequence was found to indicate that another call
415 * might be able to do more. Return (0) if we found no suitable sequence.
416 *
417 * NB: called from alloc_unr(), no new memory allocation allowed.
418 */
419 static int
420 optimize_unr(struct unrhdr *uh)
421 {
422 struct unr *up, *uf, *us;
423 struct unrb *ub, *ubf;
424 u_int a, l, ba;
425
426 /*
427 * Look for the run of items (if any) which when collapsed into
428 * a bitmap would save most memory.
429 */
430 us = NULL;
431 ba = 0;
432 TAILQ_FOREACH(uf, &uh->head, list) {
433 if (uf->len >= NBITS)
434 continue;
435 a = 1;
436 if (is_bitmap(uh, uf))
437 a++;
438 l = uf->len;
439 up = uf;
440 while (1) {
441 up = TAILQ_NEXT(up, list);
442 if (up == NULL)
443 break;
444 if ((up->len + l) > NBITS)
445 break;
446 a++;
447 if (is_bitmap(uh, up))
448 a++;
449 l += up->len;
450 }
451 if (a > ba) {
452 ba = a;
453 us = uf;
454 }
455 }
456 if (ba < 3)
457 return (0);
458
459 /*
460 * If the first element is not a bitmap, make it one.
461 * Trying to do so without allocating more memory complicates things
462 * a bit
463 */
464 if (!is_bitmap(uh, us)) {
465 uf = TAILQ_NEXT(us, list);
466 TAILQ_REMOVE(&uh->head, us, list);
467 a = us->len;
468 l = us->ptr == uh ? 1 : 0;
469 ub = (void *)us;
470 bit_nclear(ub->map, 0, NBITS - 1);
471 if (l)
472 bit_nset(ub->map, 0, a);
473 if (!is_bitmap(uh, uf)) {
474 if (uf->ptr == NULL)
475 bit_nclear(ub->map, a, a + uf->len - 1);
476 else
477 bit_nset(ub->map, a, a + uf->len - 1);
478 uf->ptr = ub;
479 uf->len += a;
480 us = uf;
481 } else {
482 ubf = uf->ptr;
483 for (l = 0; l < uf->len; l++, a++) {
484 if (bit_test(ubf->map, l))
485 bit_set(ub->map, a);
486 else
487 bit_clear(ub->map, a);
488 }
489 uf->len = a;
490 delete_unr(uh, uf->ptr);
491 uf->ptr = ub;
492 us = uf;
493 }
494 }
495 ub = us->ptr;
496 while (1) {
497 uf = TAILQ_NEXT(us, list);
498 if (uf == NULL)
499 return (1);
500 if (uf->len + us->len > NBITS)
501 return (1);
502 if (uf->ptr == NULL) {
503 bit_nclear(ub->map, us->len, us->len + uf->len - 1);
504 us->len += uf->len;
505 TAILQ_REMOVE(&uh->head, uf, list);
506 delete_unr(uh, uf);
507 } else if (uf->ptr == uh) {
508 bit_nset(ub->map, us->len, us->len + uf->len - 1);
509 us->len += uf->len;
510 TAILQ_REMOVE(&uh->head, uf, list);
511 delete_unr(uh, uf);
512 } else {
513 ubf = uf->ptr;
514 for (l = 0; l < uf->len; l++, us->len++) {
515 if (bit_test(ubf->map, l))
516 bit_set(ub->map, us->len);
517 else
518 bit_clear(ub->map, us->len);
519 }
520 TAILQ_REMOVE(&uh->head, uf, list);
521 delete_unr(uh, ubf);
522 delete_unr(uh, uf);
523 }
524 }
525 }
526
527 /*
528 * See if a given unr should be collapsed with a neighbor.
529 *
530 * NB: called from alloc_unr(), no new memory allocation allowed.
531 */
532 static void
533 collapse_unr(struct unrhdr *uh, struct unr *up)
534 {
535 struct unr *upp;
536 struct unrb *ub;
537
538 /* If bitmap is all set or clear, change it to runlength */
539 if (is_bitmap(uh, up)) {
540 ub = up->ptr;
541 if (ub_full(ub, up->len)) {
542 delete_unr(uh, up->ptr);
543 up->ptr = uh;
544 } else if (ub_empty(ub, up->len)) {
545 delete_unr(uh, up->ptr);
546 up->ptr = NULL;
547 }
548 }
549
550 /* If nothing left in runlength, delete it */
551 if (up->len == 0) {
552 upp = TAILQ_PREV(up, unrhd, list);
553 if (upp == NULL)
554 upp = TAILQ_NEXT(up, list);
555 TAILQ_REMOVE(&uh->head, up, list);
556 delete_unr(uh, up);
557 up = upp;
558 }
559
560 /* If we have "hot-spot" still, merge with neighbor if possible */
561 if (up != NULL) {
562 upp = TAILQ_PREV(up, unrhd, list);
563 if (upp != NULL && up->ptr == upp->ptr) {
564 up->len += upp->len;
565 TAILQ_REMOVE(&uh->head, upp, list);
566 delete_unr(uh, upp);
567 }
568 upp = TAILQ_NEXT(up, list);
569 if (upp != NULL && up->ptr == upp->ptr) {
570 up->len += upp->len;
571 TAILQ_REMOVE(&uh->head, upp, list);
572 delete_unr(uh, upp);
573 }
574 }
575
576 /* Merge into ->first if possible */
577 upp = TAILQ_FIRST(&uh->head);
578 if (upp != NULL && upp->ptr == uh) {
579 uh->first += upp->len;
580 TAILQ_REMOVE(&uh->head, upp, list);
581 delete_unr(uh, upp);
582 if (up == upp)
583 up = NULL;
584 }
585
586 /* Merge into ->last if possible */
587 upp = TAILQ_LAST(&uh->head, unrhd);
588 if (upp != NULL && upp->ptr == NULL) {
589 uh->last += upp->len;
590 TAILQ_REMOVE(&uh->head, upp, list);
591 delete_unr(uh, upp);
592 if (up == upp)
593 up = NULL;
594 }
595
596 /* Try to make bitmaps */
597 while (optimize_unr(uh))
598 continue;
599 }
600
601 /*
602 * Allocate a free unr.
603 */
604 int
605 alloc_unrl(struct unrhdr *uh)
606 {
607 struct unr *up;
608 struct unrb *ub;
609 u_int x;
610 int y;
611
612 mtx_assert(uh->mtx, MA_OWNED);
613 check_unrhdr(uh, __LINE__);
614 x = uh->low + uh->first;
615
616 up = TAILQ_FIRST(&uh->head);
617
618 /*
619 * If we have an ideal split, just adjust the first+last
620 */
621 if (up == NULL && uh->last > 0) {
622 uh->first++;
623 uh->last--;
624 uh->busy++;
625 return (x);
626 }
627
628 /*
629 * We can always allocate from the first list element, so if we have
630 * nothing on the list, we must have run out of unit numbers.
631 */
632 if (up == NULL)
633 return (-1);
634
635 KASSERT(up->ptr != uh, ("UNR first element is allocated"));
636
637 if (up->ptr == NULL) { /* free run */
638 uh->first++;
639 up->len--;
640 } else { /* bitmap */
641 ub = up->ptr;
642 bit_ffc(ub->map, up->len, &y);
643 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
644 bit_set(ub->map, y);
645 x += y;
646 }
647 uh->busy++;
648 collapse_unr(uh, up);
649 return (x);
650 }
651
652 int
653 alloc_unr(struct unrhdr *uh)
654 {
655 int i;
656
657 mtx_lock(uh->mtx);
658 i = alloc_unrl(uh);
659 clean_unrhdrl(uh);
660 mtx_unlock(uh->mtx);
661 return (i);
662 }
663
664 static int
665 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
666 {
667 struct unr *up, *upn;
668 struct unrb *ub;
669 u_int i, last, tl;
670
671 mtx_assert(uh->mtx, MA_OWNED);
672
673 if (item < uh->low + uh->first || item > uh->high)
674 return (-1);
675
676 up = TAILQ_FIRST(&uh->head);
677 /* Ideal split. */
678 if (up == NULL && item - uh->low == uh->first) {
679 uh->first++;
680 uh->last--;
681 uh->busy++;
682 check_unrhdr(uh, __LINE__);
683 return (item);
684 }
685
686 i = item - uh->low - uh->first;
687
688 if (up == NULL) {
689 up = new_unr(uh, p1, p2);
690 up->ptr = NULL;
691 up->len = i;
692 TAILQ_INSERT_TAIL(&uh->head, up, list);
693 up = new_unr(uh, p1, p2);
694 up->ptr = uh;
695 up->len = 1;
696 TAILQ_INSERT_TAIL(&uh->head, up, list);
697 uh->last = uh->high - uh->low - i;
698 uh->busy++;
699 check_unrhdr(uh, __LINE__);
700 return (item);
701 } else {
702 /* Find the item which contains the unit we want to allocate. */
703 TAILQ_FOREACH(up, &uh->head, list) {
704 if (up->len > i)
705 break;
706 i -= up->len;
707 }
708 }
709
710 if (up == NULL) {
711 if (i > 0) {
712 up = new_unr(uh, p1, p2);
713 up->ptr = NULL;
714 up->len = i;
715 TAILQ_INSERT_TAIL(&uh->head, up, list);
716 }
717 up = new_unr(uh, p1, p2);
718 up->ptr = uh;
719 up->len = 1;
720 TAILQ_INSERT_TAIL(&uh->head, up, list);
721 goto done;
722 }
723
724 if (is_bitmap(uh, up)) {
725 ub = up->ptr;
726 if (bit_test(ub->map, i) == 0) {
727 bit_set(ub->map, i);
728 goto done;
729 } else
730 return (-1);
731 } else if (up->ptr == uh)
732 return (-1);
733
734 KASSERT(up->ptr == NULL,
735 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
736
737 /* Split off the tail end, if any. */
738 tl = up->len - (1 + i);
739 if (tl > 0) {
740 upn = new_unr(uh, p1, p2);
741 upn->ptr = NULL;
742 upn->len = tl;
743 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
744 }
745
746 /* Split off head end, if any */
747 if (i > 0) {
748 upn = new_unr(uh, p1, p2);
749 upn->len = i;
750 upn->ptr = NULL;
751 TAILQ_INSERT_BEFORE(up, upn, list);
752 }
753 up->len = 1;
754 up->ptr = uh;
755
756 done:
757 last = uh->high - uh->low - (item - uh->low);
758 if (uh->last > last)
759 uh->last = last;
760 uh->busy++;
761 collapse_unr(uh, up);
762 check_unrhdr(uh, __LINE__);
763 return (item);
764 }
765
766 int
767 alloc_unr_specific(struct unrhdr *uh, u_int item)
768 {
769 void *p1, *p2;
770 int i;
771
772 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
773
774 p1 = Malloc(sizeof(struct unr));
775 p2 = Malloc(sizeof(struct unr));
776
777 mtx_lock(uh->mtx);
778 i = alloc_unr_specificl(uh, item, &p1, &p2);
779 mtx_unlock(uh->mtx);
780
781 if (p1 != NULL)
782 Free(p1);
783 if (p2 != NULL)
784 Free(p2);
785
786 return (i);
787 }
788
789 /*
790 * Free a unr.
791 *
792 * If we can save unrs by using a bitmap, do so.
793 */
794 static void
795 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
796 {
797 struct unr *up, *upp, *upn;
798 struct unrb *ub;
799 u_int pl;
800
801 KASSERT(item >= uh->low && item <= uh->high,
802 ("UNR: free_unr(%u) out of range [%u...%u]",
803 item, uh->low, uh->high));
804 check_unrhdr(uh, __LINE__);
805 item -= uh->low;
806 upp = TAILQ_FIRST(&uh->head);
807 /*
808 * Freeing in the ideal split case
809 */
810 if (item + 1 == uh->first && upp == NULL) {
811 uh->last++;
812 uh->first--;
813 uh->busy--;
814 check_unrhdr(uh, __LINE__);
815 return;
816 }
817 /*
818 * Freeing in the ->first section. Create a run starting at the
819 * freed item. The code below will subdivide it.
820 */
821 if (item < uh->first) {
822 up = new_unr(uh, p1, p2);
823 up->ptr = uh;
824 up->len = uh->first - item;
825 TAILQ_INSERT_HEAD(&uh->head, up, list);
826 uh->first -= up->len;
827 }
828
829 item -= uh->first;
830
831 /* Find the item which contains the unit we want to free */
832 TAILQ_FOREACH(up, &uh->head, list) {
833 if (up->len > item)
834 break;
835 item -= up->len;
836 }
837
838 /* Handle bitmap items */
839 if (is_bitmap(uh, up)) {
840 ub = up->ptr;
841
842 KASSERT(bit_test(ub->map, item) != 0,
843 ("UNR: Freeing free item %d (bitmap)\n", item));
844 bit_clear(ub->map, item);
845 uh->busy--;
846 collapse_unr(uh, up);
847 return;
848 }
849
850 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
851
852 /* Just this one left, reap it */
853 if (up->len == 1) {
854 up->ptr = NULL;
855 uh->busy--;
856 collapse_unr(uh, up);
857 return;
858 }
859
860 /* Check if we can shift the item into the previous 'free' run */
861 upp = TAILQ_PREV(up, unrhd, list);
862 if (item == 0 && upp != NULL && upp->ptr == NULL) {
863 upp->len++;
864 up->len--;
865 uh->busy--;
866 collapse_unr(uh, up);
867 return;
868 }
869
870 /* Check if we can shift the item to the next 'free' run */
871 upn = TAILQ_NEXT(up, list);
872 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
873 upn->len++;
874 up->len--;
875 uh->busy--;
876 collapse_unr(uh, up);
877 return;
878 }
879
880 /* Split off the tail end, if any. */
881 pl = up->len - (1 + item);
882 if (pl > 0) {
883 upp = new_unr(uh, p1, p2);
884 upp->ptr = uh;
885 upp->len = pl;
886 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
887 }
888
889 /* Split off head end, if any */
890 if (item > 0) {
891 upp = new_unr(uh, p1, p2);
892 upp->len = item;
893 upp->ptr = uh;
894 TAILQ_INSERT_BEFORE(up, upp, list);
895 }
896 up->len = 1;
897 up->ptr = NULL;
898 uh->busy--;
899 collapse_unr(uh, up);
900 }
901
902 void
903 free_unr(struct unrhdr *uh, u_int item)
904 {
905 void *p1, *p2;
906
907 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
908 p1 = Malloc(sizeof(struct unr));
909 p2 = Malloc(sizeof(struct unr));
910 mtx_lock(uh->mtx);
911 free_unrl(uh, item, &p1, &p2);
912 clean_unrhdrl(uh);
913 mtx_unlock(uh->mtx);
914 if (p1 != NULL)
915 Free(p1);
916 if (p2 != NULL)
917 Free(p2);
918 }
919
920 #ifndef _KERNEL /* USERLAND test driver */
921
922 /*
923 * Simple stochastic test driver for the above functions. The code resides
924 * here so that it can access static functions and structures.
925 */
926
927 static bool verbose;
928 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);}
929
930 static void
931 print_unr(struct unrhdr *uh, struct unr *up)
932 {
933 u_int x;
934 struct unrb *ub;
935
936 printf(" %p len = %5u ", up, up->len);
937 if (up->ptr == NULL)
938 printf("free\n");
939 else if (up->ptr == uh)
940 printf("alloc\n");
941 else {
942 ub = up->ptr;
943 printf("bitmap [");
944 for (x = 0; x < up->len; x++) {
945 if (bit_test(ub->map, x))
946 printf("#");
947 else
948 printf(" ");
949 }
950 printf("]\n");
951 }
952 }
953
954 static void
955 print_unrhdr(struct unrhdr *uh)
956 {
957 struct unr *up;
958 u_int x;
959
960 printf(
961 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
962 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
963 x = uh->low + uh->first;
964 TAILQ_FOREACH(up, &uh->head, list) {
965 printf(" from = %5u", x);
966 print_unr(uh, up);
967 if (up->ptr == NULL || up->ptr == uh)
968 x += up->len;
969 else
970 x += NBITS;
971 }
972 }
973
974 static void
975 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
976 {
977 int j;
978
979 if (a[i]) {
980 VPRINTF("F %u\n", i);
981 free_unr(uh, i);
982 a[i] = 0;
983 } else {
984 no_alloc = 1;
985 j = alloc_unr(uh);
986 if (j != -1) {
987 a[j] = 1;
988 VPRINTF("A %d\n", j);
989 }
990 no_alloc = 0;
991 }
992 }
993
994 static void
995 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
996 {
997 int j;
998
999 j = alloc_unr_specific(uh, i);
1000 if (j == -1) {
1001 VPRINTF("F %u\n", i);
1002 a[i] = 0;
1003 free_unr(uh, i);
1004 } else {
1005 a[i] = 1;
1006 VPRINTF("A %d\n", j);
1007 }
1008 }
1009
1010 static void
1011 usage(char** argv)
1012 {
1013 printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
1014 }
1015
1016 int
1017 main(int argc, char **argv)
1018 {
1019 struct unrhdr *uh;
1020 char *a;
1021 long count = 10000; /* Number of unrs to test */
1022 long reps = 1, m;
1023 int ch;
1024 u_int i, j;
1025
1026 verbose = false;
1027
1028 while ((ch = getopt(argc, argv, "hr:v")) != -1) {
1029 switch (ch) {
1030 case 'r':
1031 errno = 0;
1032 reps = strtol(optarg, NULL, 0);
1033 if (errno == ERANGE || errno == EINVAL) {
1034 usage(argv);
1035 exit(2);
1036 }
1037
1038 break;
1039 case 'v':
1040 verbose = true;
1041 break;
1042 case 'h':
1043 default:
1044 usage(argv);
1045 exit(2);
1046 }
1047
1048
1049 }
1050
1051 setbuf(stdout, NULL);
1052 uh = new_unrhdr(0, count - 1, NULL);
1053 print_unrhdr(uh);
1054
1055 a = calloc(count, sizeof(char));
1056 if (a == NULL)
1057 err(1, "calloc failed");
1058 srandomdev();
1059
1060 printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1061 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1062 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1063 printf("NBITS %lu\n", (unsigned long)NBITS);
1064 for (m = 0; m < count * reps; m++) {
1065 j = random();
1066 i = (j >> 1) % count;
1067 #if 0
1068 if (a[i] && (j & 1))
1069 continue;
1070 #endif
1071 if ((random() & 1) != 0)
1072 test_alloc_unr(uh, i, a);
1073 else
1074 test_alloc_unr_specific(uh, i, a);
1075
1076 if (verbose)
1077 print_unrhdr(uh);
1078 check_unrhdr(uh, __LINE__);
1079 }
1080 for (i = 0; i < (u_int)count; i++) {
1081 if (a[i]) {
1082 if (verbose) {
1083 printf("C %u\n", i);
1084 print_unrhdr(uh);
1085 }
1086 free_unr(uh, i);
1087 }
1088 }
1089 print_unrhdr(uh);
1090 delete_unrhdr(uh);
1091 free(a);
1092 return (0);
1093 }
1094 #endif
Cache object: dd9904da46819f65f9d33ad1227f78ee
|