The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_unit.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2004 Poul-Henning Kamp
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD$
   29  *
   30  *
   31  * Unit number allocation functions.
   32  *
   33  * These functions implement a mixed run-length/bitmap management of unit
   34  * number spaces in a very memory efficient manner.
   35  *
   36  * Allocation policy is always lowest free number first.
   37  *
   38  * A return value of -1 signals that no more unit numbers are available.
   39  *
   40  * There is no cost associated with the range of unitnumbers, so unless
   41  * the resource really is finite, specify INT_MAX to new_unrhdr() and
   42  * forget about checking the return value.
   43  *
   44  * If a mutex is not provided when the unit number space is created, a
   45  * default global mutex is used.  The advantage to passing a mutex in, is
   46  * that the alloc_unrl() function can be called with the mutex already
   47  * held (it will not be released by alloc_unrl()).
   48  *
   49  * The allocation function alloc_unr{l}() never sleeps (but it may block on
   50  * the mutex of course).
   51  *
   52  * Freeing a unit number may require allocating memory, and can therefore
   53  * sleep so the free_unr() function does not come in a pre-locked variant.
   54  *
   55  * A userland test program is included.
   56  *
   57  * Memory usage is a very complex function of the exact allocation
   58  * pattern, but always very compact:
   59  *    * For the very typical case where a single unbroken run of unit
   60  *      numbers are allocated 44 bytes are used on i386.
   61  *    * For a unit number space of 1000 units and the random pattern
   62  *      in the usermode test program included, the worst case usage
   63  *      was 252 bytes on i386 for 500 allocated and 500 free units.
   64  *    * For a unit number space of 10000 units and the random pattern
   65  *      in the usermode test program included, the worst case usage
   66  *      was 798 bytes on i386 for 5000 allocated and 5000 free units.
   67  *    * The worst case is where every other unit number is allocated and
   68  *      the rest are free.  In that case 44 + N/4 bytes are used where
   69  *      N is the number of the highest unit allocated.
   70  */
   71 
   72 #include <sys/param.h>
   73 #include <sys/types.h>
   74 #include <sys/_unrhdr.h>
   75 
   76 #ifdef _KERNEL
   77 
   78 #include <sys/bitstring.h>
   79 #include <sys/malloc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/systm.h>
   82 #include <sys/limits.h>
   83 #include <sys/lock.h>
   84 #include <sys/mutex.h>
   85 
   86 /*
   87  * In theory it would be smarter to allocate the individual blocks
   88  * with the zone allocator, but at this time the expectation is that
   89  * there will typically not even be enough allocations to fill a single
   90  * page, so we stick with malloc for now.
   91  */
   92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
   93 
   94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
   95 #define Free(foo) free(foo, M_UNIT)
   96 
   97 static struct mtx unitmtx;
   98 
   99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
  100 
  101 #ifdef UNR64_LOCKED
  102 uint64_t
  103 alloc_unr64(struct unrhdr64 *unr64)
  104 {
  105         uint64_t item;
  106 
  107         mtx_lock(&unitmtx);
  108         item = unr64->counter++;
  109         mtx_unlock(&unitmtx);
  110         return (item);
  111 }
  112 #endif
  113 
  114 #else /* ...USERLAND */
  115 
  116 #include <bitstring.h>
  117 #include <err.h>
  118 #include <errno.h>
  119 #include <getopt.h>
  120 #include <stdbool.h>
  121 #include <stdio.h>
  122 #include <stdlib.h>
  123 #include <string.h>
  124 
  125 #define KASSERT(cond, arg) \
  126         do { \
  127                 if (!(cond)) { \
  128                         printf arg; \
  129                         abort(); \
  130                 } \
  131         } while (0)
  132 
  133 static int no_alloc;
  134 #define Malloc(foo) _Malloc(foo, __LINE__)
  135 static void *
  136 _Malloc(size_t foo, int line)
  137 {
  138 
  139         KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
  140         return (calloc(foo, 1));
  141 }
  142 #define Free(foo) free(foo)
  143 
  144 struct unrhdr;
  145 
  146 #define UNR_NO_MTX      ((void *)(uintptr_t)-1)
  147 
  148 struct mtx {
  149         int     state;
  150 } unitmtx;
  151 
  152 static void
  153 mtx_lock(struct mtx *mp)
  154 {
  155         KASSERT(mp->state == 0, ("mutex already locked"));
  156         mp->state = 1;
  157 }
  158 
  159 static void
  160 mtx_unlock(struct mtx *mp)
  161 {
  162         KASSERT(mp->state == 1, ("mutex not locked"));
  163         mp->state = 0;
  164 }
  165 
  166 #define MA_OWNED        9
  167 
  168 static void
  169 mtx_assert(struct mtx *mp, int flag)
  170 {
  171         if (flag == MA_OWNED) {
  172                 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
  173         }
  174 }
  175 
  176 #define CTASSERT(foo)
  177 #define WITNESS_WARN(flags, lock, fmt, ...)     (void)0
  178 
  179 #endif /* USERLAND */
  180 
  181 /*
  182  * This is our basic building block.
  183  *
  184  * It can be used in three different ways depending on the value of the ptr
  185  * element:
  186  *     If ptr is NULL, it represents a run of free items.
  187  *     If ptr points to the unrhdr it represents a run of allocated items.
  188  *     Otherwise it points to a bitstring of allocated items.
  189  *
  190  * For runs the len field is the length of the run.
  191  * For bitmaps the len field represents the number of allocated items.
  192  *
  193  * The bitmap is the same size as struct unr to optimize memory management.
  194  */
  195 struct unr {
  196         TAILQ_ENTRY(unr)        list;
  197         u_int                   len;
  198         void                    *ptr;
  199 };
  200 
  201 struct unrb {
  202         bitstr_t                map[sizeof(struct unr) / sizeof(bitstr_t)];
  203 };
  204 
  205 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
  206 
  207 /* Number of bits we can store in the bitmap */
  208 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
  209 
  210 /* Is the unrb empty in at least the first len bits? */
  211 static inline bool
  212 ub_empty(struct unrb *ub, int len) {
  213         int first_set;
  214 
  215         bit_ffs(ub->map, len, &first_set);
  216         return (first_set == -1);
  217 }
  218 
  219 /* Is the unrb full?  That is, is the number of set elements equal to len? */
  220 static inline bool
  221 ub_full(struct unrb *ub, int len)
  222 {
  223         int first_clear;
  224 
  225         bit_ffc(ub->map, len, &first_clear);
  226         return (first_clear == -1);
  227 }
  228 
  229 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
  230 /*
  231  * Consistency check function.
  232  *
  233  * Checks the internal consistency as well as we can.
  234  *
  235  * Called at all boundaries of this API.
  236  */
  237 static void
  238 check_unrhdr(struct unrhdr *uh, int line)
  239 {
  240         struct unr *up;
  241         struct unrb *ub;
  242         int w;
  243         u_int y, z;
  244 
  245         y = uh->first;
  246         z = 0;
  247         TAILQ_FOREACH(up, &uh->head, list) {
  248                 z++;
  249                 if (up->ptr != uh && up->ptr != NULL) {
  250                         ub = up->ptr;
  251                         KASSERT (up->len <= NBITS,
  252                             ("UNR inconsistency: len %u max %zd (line %d)\n",
  253                             up->len, NBITS, line));
  254                         z++;
  255                         w = 0;
  256                         bit_count(ub->map, 0, up->len, &w);
  257                         y += w;
  258                 } else if (up->ptr != NULL)
  259                         y += up->len;
  260         }
  261         KASSERT (y == uh->busy,
  262             ("UNR inconsistency: items %u found %u (line %d)\n",
  263             uh->busy, y, line));
  264         KASSERT (z == uh->alloc,
  265             ("UNR inconsistency: chunks %u found %u (line %d)\n",
  266             uh->alloc, z, line));
  267 }
  268 
  269 #else
  270 
  271 static __inline void
  272 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
  273 {
  274 
  275 }
  276 
  277 #endif
  278 
  279 /*
  280  * Userland memory management.  Just use calloc and keep track of how
  281  * many elements we have allocated for check_unrhdr().
  282  */
  283 
  284 static __inline void *
  285 new_unr(struct unrhdr *uh, void **p1, void **p2)
  286 {
  287         void *p;
  288 
  289         uh->alloc++;
  290         KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
  291         if (*p1 != NULL) {
  292                 p = *p1;
  293                 *p1 = NULL;
  294                 return (p);
  295         } else {
  296                 p = *p2;
  297                 *p2 = NULL;
  298                 return (p);
  299         }
  300 }
  301 
  302 static __inline void
  303 delete_unr(struct unrhdr *uh, void *ptr)
  304 {
  305         struct unr *up;
  306 
  307         uh->alloc--;
  308         up = ptr;
  309         TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
  310 }
  311 
  312 void
  313 clean_unrhdrl(struct unrhdr *uh)
  314 {
  315         struct unr *up;
  316 
  317         if (uh->mtx != NULL)
  318                 mtx_assert(uh->mtx, MA_OWNED);
  319         while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
  320                 TAILQ_REMOVE(&uh->ppfree, up, list);
  321                 if (uh->mtx != NULL)
  322                         mtx_unlock(uh->mtx);
  323                 Free(up);
  324                 if (uh->mtx != NULL)
  325                         mtx_lock(uh->mtx);
  326         }
  327 
  328 }
  329 
  330 void
  331 clean_unrhdr(struct unrhdr *uh)
  332 {
  333 
  334         if (uh->mtx != NULL)
  335                 mtx_lock(uh->mtx);
  336         clean_unrhdrl(uh);
  337         if (uh->mtx != NULL)
  338                 mtx_unlock(uh->mtx);
  339 }
  340 
  341 void
  342 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
  343 {
  344 
  345         KASSERT(low >= 0 && low <= high,
  346             ("UNR: use error: new_unrhdr(%d, %d)", low, high));
  347         if (mutex == UNR_NO_MTX)
  348                 uh->mtx = NULL;
  349         else if (mutex != NULL)
  350                 uh->mtx = mutex;
  351         else
  352                 uh->mtx = &unitmtx;
  353         TAILQ_INIT(&uh->head);
  354         TAILQ_INIT(&uh->ppfree);
  355         uh->low = low;
  356         uh->high = high;
  357         uh->first = 0;
  358         uh->last = 1 + (high - low);
  359         uh->busy = 0;
  360         uh->alloc = 0;
  361         check_unrhdr(uh, __LINE__);
  362 }
  363 
  364 /*
  365  * Allocate a new unrheader set.
  366  *
  367  * Highest and lowest valid values given as parameters.
  368  */
  369 
  370 struct unrhdr *
  371 new_unrhdr(int low, int high, struct mtx *mutex)
  372 {
  373         struct unrhdr *uh;
  374 
  375         uh = Malloc(sizeof *uh);
  376         init_unrhdr(uh, low, high, mutex);
  377         return (uh);
  378 }
  379 
  380 void
  381 delete_unrhdr(struct unrhdr *uh)
  382 {
  383 
  384         check_unrhdr(uh, __LINE__);
  385         KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
  386         KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
  387         KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
  388             ("unrhdr has postponed item for free"));
  389         Free(uh);
  390 }
  391 
  392 void
  393 clear_unrhdr(struct unrhdr *uh)
  394 {
  395         struct unr *up, *uq;
  396 
  397         KASSERT(TAILQ_EMPTY(&uh->ppfree),
  398             ("unrhdr has postponed item for free"));
  399         TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
  400                 if (up->ptr != uh) {
  401                         Free(up->ptr);
  402                 }
  403                 Free(up);
  404         }
  405         uh->busy = 0;
  406         uh->alloc = 0;
  407         init_unrhdr(uh, uh->low, uh->high, uh->mtx);
  408 
  409         check_unrhdr(uh, __LINE__);
  410 }
  411 
  412 static __inline int
  413 is_bitmap(struct unrhdr *uh, struct unr *up)
  414 {
  415         return (up->ptr != uh && up->ptr != NULL);
  416 }
  417 
  418 /*
  419  * Look for sequence of items which can be combined into a bitmap, if
  420  * multiple are present, take the one which saves most memory.
  421  *
  422  * Return (1) if a sequence was found to indicate that another call
  423  * might be able to do more.  Return (0) if we found no suitable sequence.
  424  *
  425  * NB: called from alloc_unr(), no new memory allocation allowed.
  426  */
  427 static int
  428 optimize_unr(struct unrhdr *uh)
  429 {
  430         struct unr *up, *uf, *us;
  431         struct unrb *ub, *ubf;
  432         u_int a, l, ba;
  433 
  434         /*
  435          * Look for the run of items (if any) which when collapsed into
  436          * a bitmap would save most memory.
  437          */
  438         us = NULL;
  439         ba = 0;
  440         TAILQ_FOREACH(uf, &uh->head, list) {
  441                 if (uf->len >= NBITS)
  442                         continue;
  443                 a = 1;
  444                 if (is_bitmap(uh, uf))
  445                         a++;
  446                 l = uf->len;
  447                 up = uf;
  448                 while (1) {
  449                         up = TAILQ_NEXT(up, list);
  450                         if (up == NULL)
  451                                 break;
  452                         if ((up->len + l) > NBITS)
  453                                 break;
  454                         a++;
  455                         if (is_bitmap(uh, up))
  456                                 a++;
  457                         l += up->len;
  458                 }
  459                 if (a > ba) {
  460                         ba = a;
  461                         us = uf;
  462                 }
  463         }
  464         if (ba < 3)
  465                 return (0);
  466 
  467         /*
  468          * If the first element is not a bitmap, make it one.
  469          * Trying to do so without allocating more memory complicates things
  470          * a bit
  471          */
  472         if (!is_bitmap(uh, us)) {
  473                 uf = TAILQ_NEXT(us, list);
  474                 TAILQ_REMOVE(&uh->head, us, list);
  475                 a = us->len;
  476                 l = us->ptr == uh ? 1 : 0;
  477                 ub = (void *)us;
  478                 bit_nclear(ub->map, 0, NBITS - 1);
  479                 if (l)
  480                         bit_nset(ub->map, 0, a);
  481                 if (!is_bitmap(uh, uf)) {
  482                         if (uf->ptr == NULL)
  483                                 bit_nclear(ub->map, a, a + uf->len - 1);
  484                         else
  485                                 bit_nset(ub->map, a, a + uf->len - 1);
  486                         uf->ptr = ub;
  487                         uf->len += a;
  488                         us = uf;
  489                 } else {
  490                         ubf = uf->ptr;
  491                         for (l = 0; l < uf->len; l++, a++) {
  492                                 if (bit_test(ubf->map, l))
  493                                         bit_set(ub->map, a);
  494                                 else
  495                                         bit_clear(ub->map, a);
  496                         }
  497                         uf->len = a;
  498                         delete_unr(uh, uf->ptr);
  499                         uf->ptr = ub;
  500                         us = uf;
  501                 }
  502         }
  503         ub = us->ptr;
  504         while (1) {
  505                 uf = TAILQ_NEXT(us, list);
  506                 if (uf == NULL)
  507                         return (1);
  508                 if (uf->len + us->len > NBITS)
  509                         return (1);
  510                 if (uf->ptr == NULL) {
  511                         bit_nclear(ub->map, us->len, us->len + uf->len - 1);
  512                         us->len += uf->len;
  513                         TAILQ_REMOVE(&uh->head, uf, list);
  514                         delete_unr(uh, uf);
  515                 } else if (uf->ptr == uh) {
  516                         bit_nset(ub->map, us->len, us->len + uf->len - 1);
  517                         us->len += uf->len;
  518                         TAILQ_REMOVE(&uh->head, uf, list);
  519                         delete_unr(uh, uf);
  520                 } else {
  521                         ubf = uf->ptr;
  522                         for (l = 0; l < uf->len; l++, us->len++) {
  523                                 if (bit_test(ubf->map, l))
  524                                         bit_set(ub->map, us->len);
  525                                 else
  526                                         bit_clear(ub->map, us->len);
  527                         }
  528                         TAILQ_REMOVE(&uh->head, uf, list);
  529                         delete_unr(uh, ubf);
  530                         delete_unr(uh, uf);
  531                 }
  532         }
  533 }
  534 
  535 /*
  536  * See if a given unr should be collapsed with a neighbor.
  537  *
  538  * NB: called from alloc_unr(), no new memory allocation allowed.
  539  */
  540 static void
  541 collapse_unr(struct unrhdr *uh, struct unr *up)
  542 {
  543         struct unr *upp;
  544         struct unrb *ub;
  545 
  546         /* If bitmap is all set or clear, change it to runlength */
  547         if (is_bitmap(uh, up)) {
  548                 ub = up->ptr;
  549                 if (ub_full(ub, up->len)) {
  550                         delete_unr(uh, up->ptr);
  551                         up->ptr = uh;
  552                 } else if (ub_empty(ub, up->len)) {
  553                         delete_unr(uh, up->ptr);
  554                         up->ptr = NULL;
  555                 }
  556         }
  557 
  558         /* If nothing left in runlength, delete it */
  559         if (up->len == 0) {
  560                 upp = TAILQ_PREV(up, unrhd, list);
  561                 if (upp == NULL)
  562                         upp = TAILQ_NEXT(up, list);
  563                 TAILQ_REMOVE(&uh->head, up, list);
  564                 delete_unr(uh, up);
  565                 up = upp;
  566         }
  567 
  568         /* If we have "hot-spot" still, merge with neighbor if possible */
  569         if (up != NULL) {
  570                 upp = TAILQ_PREV(up, unrhd, list);
  571                 if (upp != NULL && up->ptr == upp->ptr) {
  572                         up->len += upp->len;
  573                         TAILQ_REMOVE(&uh->head, upp, list);
  574                         delete_unr(uh, upp);
  575                         }
  576                 upp = TAILQ_NEXT(up, list);
  577                 if (upp != NULL && up->ptr == upp->ptr) {
  578                         up->len += upp->len;
  579                         TAILQ_REMOVE(&uh->head, upp, list);
  580                         delete_unr(uh, upp);
  581                 }
  582         }
  583 
  584         /* Merge into ->first if possible */
  585         upp = TAILQ_FIRST(&uh->head);
  586         if (upp != NULL && upp->ptr == uh) {
  587                 uh->first += upp->len;
  588                 TAILQ_REMOVE(&uh->head, upp, list);
  589                 delete_unr(uh, upp);
  590                 if (up == upp)
  591                         up = NULL;
  592         }
  593 
  594         /* Merge into ->last if possible */
  595         upp = TAILQ_LAST(&uh->head, unrhd);
  596         if (upp != NULL && upp->ptr == NULL) {
  597                 uh->last += upp->len;
  598                 TAILQ_REMOVE(&uh->head, upp, list);
  599                 delete_unr(uh, upp);
  600                 if (up == upp)
  601                         up = NULL;
  602         }
  603 
  604         /* Try to make bitmaps */
  605         while (optimize_unr(uh))
  606                 continue;
  607 }
  608 
  609 /*
  610  * Allocate a free unr.
  611  */
  612 int
  613 alloc_unrl(struct unrhdr *uh)
  614 {
  615         struct unr *up;
  616         struct unrb *ub;
  617         u_int x;
  618         int y;
  619 
  620         if (uh->mtx != NULL)
  621                 mtx_assert(uh->mtx, MA_OWNED);
  622         check_unrhdr(uh, __LINE__);
  623         x = uh->low + uh->first;
  624 
  625         up = TAILQ_FIRST(&uh->head);
  626 
  627         /*
  628          * If we have an ideal split, just adjust the first+last
  629          */
  630         if (up == NULL && uh->last > 0) {
  631                 uh->first++;
  632                 uh->last--;
  633                 uh->busy++;
  634                 return (x);
  635         }
  636 
  637         /*
  638          * We can always allocate from the first list element, so if we have
  639          * nothing on the list, we must have run out of unit numbers.
  640          */
  641         if (up == NULL)
  642                 return (-1);
  643 
  644         KASSERT(up->ptr != uh, ("UNR first element is allocated"));
  645 
  646         if (up->ptr == NULL) {  /* free run */
  647                 uh->first++;
  648                 up->len--;
  649         } else {                /* bitmap */
  650                 ub = up->ptr;
  651                 bit_ffc(ub->map, up->len, &y);
  652                 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
  653                 bit_set(ub->map, y);
  654                 x += y;
  655         }
  656         uh->busy++;
  657         collapse_unr(uh, up);
  658         return (x);
  659 }
  660 
  661 int
  662 alloc_unr(struct unrhdr *uh)
  663 {
  664         int i;
  665 
  666         if (uh->mtx != NULL)
  667                 mtx_lock(uh->mtx);
  668         i = alloc_unrl(uh);
  669         clean_unrhdrl(uh);
  670         if (uh->mtx != NULL)
  671                 mtx_unlock(uh->mtx);
  672         return (i);
  673 }
  674 
  675 static int
  676 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  677 {
  678         struct unr *up, *upn;
  679         struct unrb *ub;
  680         u_int i, last, tl;
  681 
  682         if (uh->mtx != NULL)
  683                 mtx_assert(uh->mtx, MA_OWNED);
  684 
  685         if (item < uh->low + uh->first || item > uh->high)
  686                 return (-1);
  687 
  688         up = TAILQ_FIRST(&uh->head);
  689         /* Ideal split. */
  690         if (up == NULL && item - uh->low == uh->first) {
  691                 uh->first++;
  692                 uh->last--;
  693                 uh->busy++;
  694                 check_unrhdr(uh, __LINE__);
  695                 return (item);
  696         }
  697 
  698         i = item - uh->low - uh->first;
  699 
  700         if (up == NULL) {
  701                 up = new_unr(uh, p1, p2);
  702                 up->ptr = NULL;
  703                 up->len = i;
  704                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  705                 up = new_unr(uh, p1, p2);
  706                 up->ptr = uh;
  707                 up->len = 1;
  708                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  709                 uh->last = uh->high - uh->low - i;
  710                 uh->busy++;
  711                 check_unrhdr(uh, __LINE__);
  712                 return (item);
  713         } else {
  714                 /* Find the item which contains the unit we want to allocate. */
  715                 TAILQ_FOREACH(up, &uh->head, list) {
  716                         if (up->len > i)
  717                                 break;
  718                         i -= up->len;
  719                 }
  720         }
  721 
  722         if (up == NULL) {
  723                 if (i > 0) {
  724                         up = new_unr(uh, p1, p2);
  725                         up->ptr = NULL;
  726                         up->len = i;
  727                         TAILQ_INSERT_TAIL(&uh->head, up, list);
  728                 }
  729                 up = new_unr(uh, p1, p2);
  730                 up->ptr = uh;
  731                 up->len = 1;
  732                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  733                 goto done;
  734         }
  735 
  736         if (is_bitmap(uh, up)) {
  737                 ub = up->ptr;
  738                 if (bit_test(ub->map, i) == 0) {
  739                         bit_set(ub->map, i);
  740                         goto done;
  741                 } else
  742                         return (-1);
  743         } else if (up->ptr == uh)
  744                 return (-1);
  745 
  746         KASSERT(up->ptr == NULL,
  747             ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
  748 
  749         /* Split off the tail end, if any. */
  750         tl = up->len - (1 + i);
  751         if (tl > 0) {
  752                 upn = new_unr(uh, p1, p2);
  753                 upn->ptr = NULL;
  754                 upn->len = tl;
  755                 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
  756         }
  757 
  758         /* Split off head end, if any */
  759         if (i > 0) {
  760                 upn = new_unr(uh, p1, p2);
  761                 upn->len = i;
  762                 upn->ptr = NULL;
  763                 TAILQ_INSERT_BEFORE(up, upn, list);
  764         }
  765         up->len = 1;
  766         up->ptr = uh;
  767 
  768 done:
  769         last = uh->high - uh->low - (item - uh->low);
  770         if (uh->last > last)
  771                 uh->last = last;
  772         uh->busy++;
  773         collapse_unr(uh, up);
  774         check_unrhdr(uh, __LINE__);
  775         return (item);
  776 }
  777 
  778 int
  779 alloc_unr_specific(struct unrhdr *uh, u_int item)
  780 {
  781         void *p1, *p2;
  782         int i;
  783 
  784         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
  785 
  786         p1 = Malloc(sizeof(struct unr));
  787         p2 = Malloc(sizeof(struct unr));
  788 
  789         if (uh->mtx != NULL)
  790                 mtx_lock(uh->mtx);
  791         i = alloc_unr_specificl(uh, item, &p1, &p2);
  792         if (uh->mtx != NULL)
  793                 mtx_unlock(uh->mtx);
  794 
  795         if (p1 != NULL)
  796                 Free(p1);
  797         if (p2 != NULL)
  798                 Free(p2);
  799 
  800         return (i);
  801 }
  802 
  803 /*
  804  * Free a unr.
  805  *
  806  * If we can save unrs by using a bitmap, do so.
  807  */
  808 static void
  809 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  810 {
  811         struct unr *up, *upp, *upn;
  812         struct unrb *ub;
  813         u_int pl;
  814 
  815         KASSERT(item >= uh->low && item <= uh->high,
  816             ("UNR: free_unr(%u) out of range [%u...%u]",
  817              item, uh->low, uh->high));
  818         check_unrhdr(uh, __LINE__);
  819         item -= uh->low;
  820         upp = TAILQ_FIRST(&uh->head);
  821         /*
  822          * Freeing in the ideal split case
  823          */
  824         if (item + 1 == uh->first && upp == NULL) {
  825                 uh->last++;
  826                 uh->first--;
  827                 uh->busy--;
  828                 check_unrhdr(uh, __LINE__);
  829                 return;
  830         }
  831         /*
  832          * Freeing in the ->first section.  Create a run starting at the
  833          * freed item.  The code below will subdivide it.
  834          */
  835         if (item < uh->first) {
  836                 up = new_unr(uh, p1, p2);
  837                 up->ptr = uh;
  838                 up->len = uh->first - item;
  839                 TAILQ_INSERT_HEAD(&uh->head, up, list);
  840                 uh->first -= up->len;
  841         }
  842 
  843         item -= uh->first;
  844 
  845         /* Find the item which contains the unit we want to free */
  846         TAILQ_FOREACH(up, &uh->head, list) {
  847                 if (up->len > item)
  848                         break;
  849                 item -= up->len;
  850         }
  851 
  852         /* Handle bitmap items */
  853         if (is_bitmap(uh, up)) {
  854                 ub = up->ptr;
  855 
  856                 KASSERT(bit_test(ub->map, item) != 0,
  857                     ("UNR: Freeing free item %d (bitmap)\n", item));
  858                 bit_clear(ub->map, item);
  859                 uh->busy--;
  860                 collapse_unr(uh, up);
  861                 return;
  862         }
  863 
  864         KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
  865 
  866         /* Just this one left, reap it */
  867         if (up->len == 1) {
  868                 up->ptr = NULL;
  869                 uh->busy--;
  870                 collapse_unr(uh, up);
  871                 return;
  872         }
  873 
  874         /* Check if we can shift the item into the previous 'free' run */
  875         upp = TAILQ_PREV(up, unrhd, list);
  876         if (item == 0 && upp != NULL && upp->ptr == NULL) {
  877                 upp->len++;
  878                 up->len--;
  879                 uh->busy--;
  880                 collapse_unr(uh, up);
  881                 return;
  882         }
  883 
  884         /* Check if we can shift the item to the next 'free' run */
  885         upn = TAILQ_NEXT(up, list);
  886         if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
  887                 upn->len++;
  888                 up->len--;
  889                 uh->busy--;
  890                 collapse_unr(uh, up);
  891                 return;
  892         }
  893 
  894         /* Split off the tail end, if any. */
  895         pl = up->len - (1 + item);
  896         if (pl > 0) {
  897                 upp = new_unr(uh, p1, p2);
  898                 upp->ptr = uh;
  899                 upp->len = pl;
  900                 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
  901         }
  902 
  903         /* Split off head end, if any */
  904         if (item > 0) {
  905                 upp = new_unr(uh, p1, p2);
  906                 upp->len = item;
  907                 upp->ptr = uh;
  908                 TAILQ_INSERT_BEFORE(up, upp, list);
  909         }
  910         up->len = 1;
  911         up->ptr = NULL;
  912         uh->busy--;
  913         collapse_unr(uh, up);
  914 }
  915 
  916 void
  917 free_unr(struct unrhdr *uh, u_int item)
  918 {
  919         void *p1, *p2;
  920 
  921         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
  922         p1 = Malloc(sizeof(struct unr));
  923         p2 = Malloc(sizeof(struct unr));
  924         if (uh->mtx != NULL)
  925                 mtx_lock(uh->mtx);
  926         free_unrl(uh, item, &p1, &p2);
  927         clean_unrhdrl(uh);
  928         if (uh->mtx != NULL)
  929                 mtx_unlock(uh->mtx);
  930         if (p1 != NULL)
  931                 Free(p1);
  932         if (p2 != NULL)
  933                 Free(p2);
  934 }
  935 
  936 #ifndef _KERNEL /* USERLAND test driver */
  937 
  938 /*
  939  * Simple stochastic test driver for the above functions.  The code resides
  940  * here so that it can access static functions and structures.
  941  */
  942 
  943 static bool verbose;
  944 #define VPRINTF(...)    {if (verbose) printf(__VA_ARGS__);}
  945 
  946 static void
  947 print_unr(struct unrhdr *uh, struct unr *up)
  948 {
  949         u_int x;
  950         struct unrb *ub;
  951 
  952         printf("  %p len = %5u ", up, up->len);
  953         if (up->ptr == NULL)
  954                 printf("free\n");
  955         else if (up->ptr == uh)
  956                 printf("alloc\n");
  957         else {
  958                 ub = up->ptr;
  959                 printf("bitmap [");
  960                 for (x = 0; x < up->len; x++) {
  961                         if (bit_test(ub->map, x))
  962                                 printf("#");
  963                         else
  964                                 printf(" ");
  965                 }
  966                 printf("]\n");
  967         }
  968 }
  969 
  970 static void
  971 print_unrhdr(struct unrhdr *uh)
  972 {
  973         struct unr *up;
  974         u_int x;
  975 
  976         printf(
  977             "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
  978             uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
  979         x = uh->low + uh->first;
  980         TAILQ_FOREACH(up, &uh->head, list) {
  981                 printf("  from = %5u", x);
  982                 print_unr(uh, up);
  983                 if (up->ptr == NULL || up->ptr == uh)
  984                         x += up->len;
  985                 else
  986                         x += NBITS;
  987         }
  988 }
  989 
  990 static void
  991 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
  992 {
  993         int j;
  994 
  995         if (a[i]) {
  996                 VPRINTF("F %u\n", i);
  997                 free_unr(uh, i);
  998                 a[i] = 0;
  999         } else {
 1000                 no_alloc = 1;
 1001                 j = alloc_unr(uh);
 1002                 if (j != -1) {
 1003                         a[j] = 1;
 1004                         VPRINTF("A %d\n", j);
 1005                 }
 1006                 no_alloc = 0;
 1007         }
 1008 }
 1009 
 1010 static void
 1011 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
 1012 {
 1013         int j;
 1014 
 1015         j = alloc_unr_specific(uh, i);
 1016         if (j == -1) {
 1017                 VPRINTF("F %u\n", i);
 1018                 a[i] = 0;
 1019                 free_unr(uh, i);
 1020         } else {
 1021                 a[i] = 1;
 1022                 VPRINTF("A %d\n", j);
 1023         }
 1024 }
 1025 
 1026 static void
 1027 usage(char** argv)
 1028 {
 1029         printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
 1030 }
 1031 
 1032 int
 1033 main(int argc, char **argv)
 1034 {
 1035         struct unrhdr *uh;
 1036         char *a;
 1037         long count = 10000;     /* Number of unrs to test */
 1038         long reps = 1, m;
 1039         int ch;
 1040         u_int i;
 1041 
 1042         verbose = false;
 1043 
 1044         while ((ch = getopt(argc, argv, "hr:v")) != -1) {
 1045                 switch (ch) {
 1046                 case 'r':
 1047                         errno = 0;
 1048                         reps = strtol(optarg, NULL, 0);
 1049                         if (errno == ERANGE || errno == EINVAL) {
 1050                                 usage(argv);
 1051                                 exit(2);
 1052                         }
 1053 
 1054                         break;
 1055                 case 'v':
 1056                         verbose = true;
 1057                         break;
 1058                 case 'h':
 1059                 default:
 1060                         usage(argv);
 1061                         exit(2);
 1062                 }
 1063         }
 1064 
 1065         setbuf(stdout, NULL);
 1066         uh = new_unrhdr(0, count - 1, NULL);
 1067         print_unrhdr(uh);
 1068 
 1069         a = calloc(count, sizeof(char));
 1070         if (a == NULL)
 1071                 err(1, "calloc failed");
 1072 
 1073         printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
 1074         printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
 1075         printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
 1076         printf("NBITS %lu\n", (unsigned long)NBITS);
 1077         for (m = 0; m < count * reps; m++) {
 1078                 i = arc4random_uniform(count);
 1079 #if 0
 1080                 if (a[i] && (j & 1))
 1081                         continue;
 1082 #endif
 1083                 if ((arc4random() & 1) != 0)
 1084                         test_alloc_unr(uh, i, a);
 1085                 else
 1086                         test_alloc_unr_specific(uh, i, a);
 1087 
 1088                 if (verbose)
 1089                         print_unrhdr(uh);
 1090                 check_unrhdr(uh, __LINE__);
 1091         }
 1092         for (i = 0; i < (u_int)count; i++) {
 1093                 if (a[i]) {
 1094                         if (verbose) {
 1095                                 printf("C %u\n", i);
 1096                                 print_unrhdr(uh);
 1097                         }
 1098                         free_unr(uh, i);
 1099                 }
 1100         }
 1101         print_unrhdr(uh);
 1102         delete_unrhdr(uh);
 1103         free(a);
 1104         return (0);
 1105 }
 1106 #endif

Cache object: 8b5a98597881388285c18c591a847b95


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.