The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_unit.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2004 Poul-Henning Kamp
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD$
   29  *
   30  *
   31  * Unit number allocation functions.
   32  *
   33  * These functions implement a mixed run-length/bitmap management of unit
   34  * number spaces in a very memory efficient manner.
   35  *
   36  * Allocation policy is always lowest free number first.
   37  *
   38  * A return value of -1 signals that no more unit numbers are available.
   39  *
   40  * There is no cost associated with the range of unitnumbers, so unless
   41  * the resource really is finite, specify INT_MAX to new_unrhdr() and
   42  * forget about checking the return value.
   43  *
   44  * If a mutex is not provided when the unit number space is created, a
   45  * default global mutex is used.  The advantage to passing a mutex in, is
   46  * that the alloc_unrl() function can be called with the mutex already
   47  * held (it will not be released by alloc_unrl()).
   48  *
   49  * The allocation function alloc_unr{l}() never sleeps (but it may block on
   50  * the mutex of course).
   51  *
   52  * Freeing a unit number may require allocating memory, and can therefore
   53  * sleep so the free_unr() function does not come in a pre-locked variant.
   54  *
   55  * A userland test program is included.
   56  *
   57  * Memory usage is a very complex function of the exact allocation
   58  * pattern, but always very compact:
   59  *    * For the very typical case where a single unbroken run of unit
   60  *      numbers are allocated 44 bytes are used on i386.
   61  *    * For a unit number space of 1000 units and the random pattern
   62  *      in the usermode test program included, the worst case usage
   63  *      was 252 bytes on i386 for 500 allocated and 500 free units.
   64  *    * For a unit number space of 10000 units and the random pattern
   65  *      in the usermode test program included, the worst case usage
   66  *      was 798 bytes on i386 for 5000 allocated and 5000 free units.
   67  *    * The worst case is where every other unit number is allocated and
   68  *      the rest are free.  In that case 44 + N/4 bytes are used where
   69  *      N is the number of the highest unit allocated.
   70  */
   71 
   72 #include <sys/param.h>
   73 #include <sys/types.h>
   74 #include <sys/_unrhdr.h>
   75 
   76 #ifdef _KERNEL
   77 
   78 #include <sys/bitstring.h>
   79 #include <sys/malloc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/systm.h>
   82 #include <sys/limits.h>
   83 #include <sys/lock.h>
   84 #include <sys/mutex.h>
   85 
   86 /*
   87  * In theory it would be smarter to allocate the individual blocks
   88  * with the zone allocator, but at this time the expectation is that
   89  * there will typically not even be enough allocations to fill a single
   90  * page, so we stick with malloc for now.
   91  */
   92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
   93 
   94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
   95 #define Free(foo) free(foo, M_UNIT)
   96 
   97 static struct mtx unitmtx;
   98 
   99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
  100 
  101 #else /* ...USERLAND */
  102 
  103 #include <bitstring.h>
  104 #include <err.h>
  105 #include <errno.h>
  106 #include <getopt.h>
  107 #include <stdbool.h>
  108 #include <stdio.h>
  109 #include <stdlib.h>
  110 #include <string.h>
  111 
  112 #define KASSERT(cond, arg) \
  113         do { \
  114                 if (!(cond)) { \
  115                         printf arg; \
  116                         abort(); \
  117                 } \
  118         } while (0)
  119 
  120 static int no_alloc;
  121 #define Malloc(foo) _Malloc(foo, __LINE__)
  122 static void *
  123 _Malloc(size_t foo, int line)
  124 {
  125 
  126         KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
  127         return (calloc(foo, 1));
  128 }
  129 #define Free(foo) free(foo)
  130 
  131 struct unrhdr;
  132 
  133 #define UNR_NO_MTX      ((void *)(uintptr_t)-1)
  134 
  135 struct mtx {
  136         int     state;
  137 } unitmtx;
  138 
  139 static void
  140 mtx_lock(struct mtx *mp)
  141 {
  142         KASSERT(mp->state == 0, ("mutex already locked"));
  143         mp->state = 1;
  144 }
  145 
  146 static void
  147 mtx_unlock(struct mtx *mp)
  148 {
  149         KASSERT(mp->state == 1, ("mutex not locked"));
  150         mp->state = 0;
  151 }
  152 
  153 #define MA_OWNED        9
  154 
  155 static void
  156 mtx_assert(struct mtx *mp, int flag)
  157 {
  158         if (flag == MA_OWNED) {
  159                 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
  160         }
  161 }
  162 
  163 #define CTASSERT(foo)
  164 #define WITNESS_WARN(flags, lock, fmt, ...)     (void)0
  165 
  166 #endif /* USERLAND */
  167 
  168 /*
  169  * This is our basic building block.
  170  *
  171  * It can be used in three different ways depending on the value of the ptr
  172  * element:
  173  *     If ptr is NULL, it represents a run of free items.
  174  *     If ptr points to the unrhdr it represents a run of allocated items.
  175  *     Otherwise it points to a bitstring of allocated items.
  176  *
  177  * For runs the len field is the length of the run.
  178  * For bitmaps the len field represents the number of allocated items.
  179  *
  180  * The bitmap is the same size as struct unr to optimize memory management.
  181  */
  182 struct unr {
  183         TAILQ_ENTRY(unr)        list;
  184         u_int                   len;
  185         void                    *ptr;
  186 };
  187 
  188 struct unrb {
  189         bitstr_t                map[sizeof(struct unr) / sizeof(bitstr_t)];
  190 };
  191 
  192 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
  193 
  194 /* Number of bits we can store in the bitmap */
  195 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
  196 
  197 /* Is the unrb empty in at least the first len bits? */
  198 static inline bool
  199 ub_empty(struct unrb *ub, int len) {
  200         int first_set;
  201 
  202         bit_ffs(ub->map, len, &first_set);
  203         return (first_set == -1);
  204 }
  205 
  206 /* Is the unrb full?  That is, is the number of set elements equal to len? */
  207 static inline bool
  208 ub_full(struct unrb *ub, int len)
  209 {
  210         int first_clear;
  211 
  212         bit_ffc(ub->map, len, &first_clear);
  213         return (first_clear == -1);
  214 }
  215 
  216 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
  217 /*
  218  * Consistency check function.
  219  *
  220  * Checks the internal consistency as well as we can.
  221  *
  222  * Called at all boundaries of this API.
  223  */
  224 static void
  225 check_unrhdr(struct unrhdr *uh, int line)
  226 {
  227         struct unr *up;
  228         struct unrb *ub;
  229         int w;
  230         u_int y, z;
  231 
  232         y = uh->first;
  233         z = 0;
  234         TAILQ_FOREACH(up, &uh->head, list) {
  235                 z++;
  236                 if (up->ptr != uh && up->ptr != NULL) {
  237                         ub = up->ptr;
  238                         KASSERT (up->len <= NBITS,
  239                             ("UNR inconsistency: len %u max %zd (line %d)\n",
  240                             up->len, NBITS, line));
  241                         z++;
  242                         w = 0;
  243                         bit_count(ub->map, 0, up->len, &w);
  244                         y += w;
  245                 } else if (up->ptr != NULL)
  246                         y += up->len;
  247         }
  248         KASSERT (y == uh->busy,
  249             ("UNR inconsistency: items %u found %u (line %d)\n",
  250             uh->busy, y, line));
  251         KASSERT (z == uh->alloc,
  252             ("UNR inconsistency: chunks %u found %u (line %d)\n",
  253             uh->alloc, z, line));
  254 }
  255 
  256 #else
  257 
  258 static __inline void
  259 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
  260 {
  261 
  262 }
  263 
  264 #endif
  265 
  266 /*
  267  * Userland memory management.  Just use calloc and keep track of how
  268  * many elements we have allocated for check_unrhdr().
  269  */
  270 
  271 static __inline void *
  272 new_unr(struct unrhdr *uh, void **p1, void **p2)
  273 {
  274         void *p;
  275 
  276         uh->alloc++;
  277         KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
  278         if (*p1 != NULL) {
  279                 p = *p1;
  280                 *p1 = NULL;
  281                 return (p);
  282         } else {
  283                 p = *p2;
  284                 *p2 = NULL;
  285                 return (p);
  286         }
  287 }
  288 
  289 static __inline void
  290 delete_unr(struct unrhdr *uh, void *ptr)
  291 {
  292         struct unr *up;
  293 
  294         uh->alloc--;
  295         up = ptr;
  296         TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
  297 }
  298 
  299 void
  300 clean_unrhdrl(struct unrhdr *uh)
  301 {
  302         struct unr *up;
  303 
  304         if (uh->mtx != NULL)
  305                 mtx_assert(uh->mtx, MA_OWNED);
  306         while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
  307                 TAILQ_REMOVE(&uh->ppfree, up, list);
  308                 if (uh->mtx != NULL)
  309                         mtx_unlock(uh->mtx);
  310                 Free(up);
  311                 if (uh->mtx != NULL)
  312                         mtx_lock(uh->mtx);
  313         }
  314 
  315 }
  316 
  317 void
  318 clean_unrhdr(struct unrhdr *uh)
  319 {
  320 
  321         if (uh->mtx != NULL)
  322                 mtx_lock(uh->mtx);
  323         clean_unrhdrl(uh);
  324         if (uh->mtx != NULL)
  325                 mtx_unlock(uh->mtx);
  326 }
  327 
  328 void
  329 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
  330 {
  331 
  332         KASSERT(low >= 0 && low <= high,
  333             ("UNR: use error: new_unrhdr(%d, %d)", low, high));
  334         if (mutex == UNR_NO_MTX)
  335                 uh->mtx = NULL;
  336         else if (mutex != NULL)
  337                 uh->mtx = mutex;
  338         else
  339                 uh->mtx = &unitmtx;
  340         TAILQ_INIT(&uh->head);
  341         TAILQ_INIT(&uh->ppfree);
  342         uh->low = low;
  343         uh->high = high;
  344         uh->first = 0;
  345         uh->last = 1 + (high - low);
  346         uh->busy = 0;
  347         uh->alloc = 0;
  348         check_unrhdr(uh, __LINE__);
  349 }
  350 
  351 /*
  352  * Allocate a new unrheader set.
  353  *
  354  * Highest and lowest valid values given as parameters.
  355  */
  356 
  357 struct unrhdr *
  358 new_unrhdr(int low, int high, struct mtx *mutex)
  359 {
  360         struct unrhdr *uh;
  361 
  362         uh = Malloc(sizeof *uh);
  363         init_unrhdr(uh, low, high, mutex);
  364         return (uh);
  365 }
  366 
  367 void
  368 delete_unrhdr(struct unrhdr *uh)
  369 {
  370 
  371         check_unrhdr(uh, __LINE__);
  372         KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
  373         KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
  374         KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
  375             ("unrhdr has postponed item for free"));
  376         Free(uh);
  377 }
  378 
  379 void
  380 clear_unrhdr(struct unrhdr *uh)
  381 {
  382         struct unr *up, *uq;
  383 
  384         KASSERT(TAILQ_EMPTY(&uh->ppfree),
  385             ("unrhdr has postponed item for free"));
  386         TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
  387                 if (up->ptr != uh) {
  388                         Free(up->ptr);
  389                 }
  390                 Free(up);
  391         }
  392         uh->busy = 0;
  393         uh->alloc = 0;
  394         init_unrhdr(uh, uh->low, uh->high, uh->mtx);
  395 
  396         check_unrhdr(uh, __LINE__);
  397 }
  398 
  399 static __inline int
  400 is_bitmap(struct unrhdr *uh, struct unr *up)
  401 {
  402         return (up->ptr != uh && up->ptr != NULL);
  403 }
  404 
  405 /*
  406  * Look for sequence of items which can be combined into a bitmap, if
  407  * multiple are present, take the one which saves most memory.
  408  *
  409  * Return (1) if a sequence was found to indicate that another call
  410  * might be able to do more.  Return (0) if we found no suitable sequence.
  411  *
  412  * NB: called from alloc_unr(), no new memory allocation allowed.
  413  */
  414 static int
  415 optimize_unr(struct unrhdr *uh)
  416 {
  417         struct unr *up, *uf, *us;
  418         struct unrb *ub, *ubf;
  419         u_int a, l, ba;
  420 
  421         /*
  422          * Look for the run of items (if any) which when collapsed into
  423          * a bitmap would save most memory.
  424          */
  425         us = NULL;
  426         ba = 0;
  427         TAILQ_FOREACH(uf, &uh->head, list) {
  428                 if (uf->len >= NBITS)
  429                         continue;
  430                 a = 1;
  431                 if (is_bitmap(uh, uf))
  432                         a++;
  433                 l = uf->len;
  434                 up = uf;
  435                 while (1) {
  436                         up = TAILQ_NEXT(up, list);
  437                         if (up == NULL)
  438                                 break;
  439                         if ((up->len + l) > NBITS)
  440                                 break;
  441                         a++;
  442                         if (is_bitmap(uh, up))
  443                                 a++;
  444                         l += up->len;
  445                 }
  446                 if (a > ba) {
  447                         ba = a;
  448                         us = uf;
  449                 }
  450         }
  451         if (ba < 3)
  452                 return (0);
  453 
  454         /*
  455          * If the first element is not a bitmap, make it one.
  456          * Trying to do so without allocating more memory complicates things
  457          * a bit
  458          */
  459         if (!is_bitmap(uh, us)) {
  460                 uf = TAILQ_NEXT(us, list);
  461                 TAILQ_REMOVE(&uh->head, us, list);
  462                 a = us->len;
  463                 l = us->ptr == uh ? 1 : 0;
  464                 ub = (void *)us;
  465                 bit_nclear(ub->map, 0, NBITS - 1);
  466                 if (l)
  467                         bit_nset(ub->map, 0, a);
  468                 if (!is_bitmap(uh, uf)) {
  469                         if (uf->ptr == NULL)
  470                                 bit_nclear(ub->map, a, a + uf->len - 1);
  471                         else
  472                                 bit_nset(ub->map, a, a + uf->len - 1);
  473                         uf->ptr = ub;
  474                         uf->len += a;
  475                         us = uf;
  476                 } else {
  477                         ubf = uf->ptr;
  478                         for (l = 0; l < uf->len; l++, a++) {
  479                                 if (bit_test(ubf->map, l))
  480                                         bit_set(ub->map, a);
  481                                 else
  482                                         bit_clear(ub->map, a);
  483                         }
  484                         uf->len = a;
  485                         delete_unr(uh, uf->ptr);
  486                         uf->ptr = ub;
  487                         us = uf;
  488                 }
  489         }
  490         ub = us->ptr;
  491         while (1) {
  492                 uf = TAILQ_NEXT(us, list);
  493                 if (uf == NULL)
  494                         return (1);
  495                 if (uf->len + us->len > NBITS)
  496                         return (1);
  497                 if (uf->ptr == NULL) {
  498                         bit_nclear(ub->map, us->len, us->len + uf->len - 1);
  499                         us->len += uf->len;
  500                         TAILQ_REMOVE(&uh->head, uf, list);
  501                         delete_unr(uh, uf);
  502                 } else if (uf->ptr == uh) {
  503                         bit_nset(ub->map, us->len, us->len + uf->len - 1);
  504                         us->len += uf->len;
  505                         TAILQ_REMOVE(&uh->head, uf, list);
  506                         delete_unr(uh, uf);
  507                 } else {
  508                         ubf = uf->ptr;
  509                         for (l = 0; l < uf->len; l++, us->len++) {
  510                                 if (bit_test(ubf->map, l))
  511                                         bit_set(ub->map, us->len);
  512                                 else
  513                                         bit_clear(ub->map, us->len);
  514                         }
  515                         TAILQ_REMOVE(&uh->head, uf, list);
  516                         delete_unr(uh, ubf);
  517                         delete_unr(uh, uf);
  518                 }
  519         }
  520 }
  521 
  522 /*
  523  * See if a given unr should be collapsed with a neighbor.
  524  *
  525  * NB: called from alloc_unr(), no new memory allocation allowed.
  526  */
  527 static void
  528 collapse_unr(struct unrhdr *uh, struct unr *up)
  529 {
  530         struct unr *upp;
  531         struct unrb *ub;
  532 
  533         /* If bitmap is all set or clear, change it to runlength */
  534         if (is_bitmap(uh, up)) {
  535                 ub = up->ptr;
  536                 if (ub_full(ub, up->len)) {
  537                         delete_unr(uh, up->ptr);
  538                         up->ptr = uh;
  539                 } else if (ub_empty(ub, up->len)) {
  540                         delete_unr(uh, up->ptr);
  541                         up->ptr = NULL;
  542                 }
  543         }
  544 
  545         /* If nothing left in runlength, delete it */
  546         if (up->len == 0) {
  547                 upp = TAILQ_PREV(up, unrhd, list);
  548                 if (upp == NULL)
  549                         upp = TAILQ_NEXT(up, list);
  550                 TAILQ_REMOVE(&uh->head, up, list);
  551                 delete_unr(uh, up);
  552                 up = upp;
  553         }
  554 
  555         /* If we have "hot-spot" still, merge with neighbor if possible */
  556         if (up != NULL) {
  557                 upp = TAILQ_PREV(up, unrhd, list);
  558                 if (upp != NULL && up->ptr == upp->ptr) {
  559                         up->len += upp->len;
  560                         TAILQ_REMOVE(&uh->head, upp, list);
  561                         delete_unr(uh, upp);
  562                         }
  563                 upp = TAILQ_NEXT(up, list);
  564                 if (upp != NULL && up->ptr == upp->ptr) {
  565                         up->len += upp->len;
  566                         TAILQ_REMOVE(&uh->head, upp, list);
  567                         delete_unr(uh, upp);
  568                 }
  569         }
  570 
  571         /* Merge into ->first if possible */
  572         upp = TAILQ_FIRST(&uh->head);
  573         if (upp != NULL && upp->ptr == uh) {
  574                 uh->first += upp->len;
  575                 TAILQ_REMOVE(&uh->head, upp, list);
  576                 delete_unr(uh, upp);
  577                 if (up == upp)
  578                         up = NULL;
  579         }
  580 
  581         /* Merge into ->last if possible */
  582         upp = TAILQ_LAST(&uh->head, unrhd);
  583         if (upp != NULL && upp->ptr == NULL) {
  584                 uh->last += upp->len;
  585                 TAILQ_REMOVE(&uh->head, upp, list);
  586                 delete_unr(uh, upp);
  587                 if (up == upp)
  588                         up = NULL;
  589         }
  590 
  591         /* Try to make bitmaps */
  592         while (optimize_unr(uh))
  593                 continue;
  594 }
  595 
  596 /*
  597  * Allocate a free unr.
  598  */
  599 int
  600 alloc_unrl(struct unrhdr *uh)
  601 {
  602         struct unr *up;
  603         struct unrb *ub;
  604         u_int x;
  605         int y;
  606 
  607         if (uh->mtx != NULL)
  608                 mtx_assert(uh->mtx, MA_OWNED);
  609         check_unrhdr(uh, __LINE__);
  610         x = uh->low + uh->first;
  611 
  612         up = TAILQ_FIRST(&uh->head);
  613 
  614         /*
  615          * If we have an ideal split, just adjust the first+last
  616          */
  617         if (up == NULL && uh->last > 0) {
  618                 uh->first++;
  619                 uh->last--;
  620                 uh->busy++;
  621                 return (x);
  622         }
  623 
  624         /*
  625          * We can always allocate from the first list element, so if we have
  626          * nothing on the list, we must have run out of unit numbers.
  627          */
  628         if (up == NULL)
  629                 return (-1);
  630 
  631         KASSERT(up->ptr != uh, ("UNR first element is allocated"));
  632 
  633         if (up->ptr == NULL) {  /* free run */
  634                 uh->first++;
  635                 up->len--;
  636         } else {                /* bitmap */
  637                 ub = up->ptr;
  638                 bit_ffc(ub->map, up->len, &y);
  639                 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
  640                 bit_set(ub->map, y);
  641                 x += y;
  642         }
  643         uh->busy++;
  644         collapse_unr(uh, up);
  645         return (x);
  646 }
  647 
  648 int
  649 alloc_unr(struct unrhdr *uh)
  650 {
  651         int i;
  652 
  653         if (uh->mtx != NULL)
  654                 mtx_lock(uh->mtx);
  655         i = alloc_unrl(uh);
  656         clean_unrhdrl(uh);
  657         if (uh->mtx != NULL)
  658                 mtx_unlock(uh->mtx);
  659         return (i);
  660 }
  661 
  662 static int
  663 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  664 {
  665         struct unr *up, *upn;
  666         struct unrb *ub;
  667         u_int i, last, tl;
  668 
  669         if (uh->mtx != NULL)
  670                 mtx_assert(uh->mtx, MA_OWNED);
  671 
  672         if (item < uh->low + uh->first || item > uh->high)
  673                 return (-1);
  674 
  675         up = TAILQ_FIRST(&uh->head);
  676         /* Ideal split. */
  677         if (up == NULL && item - uh->low == uh->first) {
  678                 uh->first++;
  679                 uh->last--;
  680                 uh->busy++;
  681                 check_unrhdr(uh, __LINE__);
  682                 return (item);
  683         }
  684 
  685         i = item - uh->low - uh->first;
  686 
  687         if (up == NULL) {
  688                 up = new_unr(uh, p1, p2);
  689                 up->ptr = NULL;
  690                 up->len = i;
  691                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  692                 up = new_unr(uh, p1, p2);
  693                 up->ptr = uh;
  694                 up->len = 1;
  695                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  696                 uh->last = uh->high - uh->low - i;
  697                 uh->busy++;
  698                 check_unrhdr(uh, __LINE__);
  699                 return (item);
  700         } else {
  701                 /* Find the item which contains the unit we want to allocate. */
  702                 TAILQ_FOREACH(up, &uh->head, list) {
  703                         if (up->len > i)
  704                                 break;
  705                         i -= up->len;
  706                 }
  707         }
  708 
  709         if (up == NULL) {
  710                 if (i > 0) {
  711                         up = new_unr(uh, p1, p2);
  712                         up->ptr = NULL;
  713                         up->len = i;
  714                         TAILQ_INSERT_TAIL(&uh->head, up, list);
  715                 }
  716                 up = new_unr(uh, p1, p2);
  717                 up->ptr = uh;
  718                 up->len = 1;
  719                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  720                 goto done;
  721         }
  722 
  723         if (is_bitmap(uh, up)) {
  724                 ub = up->ptr;
  725                 if (bit_test(ub->map, i) == 0) {
  726                         bit_set(ub->map, i);
  727                         goto done;
  728                 } else
  729                         return (-1);
  730         } else if (up->ptr == uh)
  731                 return (-1);
  732 
  733         KASSERT(up->ptr == NULL,
  734             ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
  735 
  736         /* Split off the tail end, if any. */
  737         tl = up->len - (1 + i);
  738         if (tl > 0) {
  739                 upn = new_unr(uh, p1, p2);
  740                 upn->ptr = NULL;
  741                 upn->len = tl;
  742                 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
  743         }
  744 
  745         /* Split off head end, if any */
  746         if (i > 0) {
  747                 upn = new_unr(uh, p1, p2);
  748                 upn->len = i;
  749                 upn->ptr = NULL;
  750                 TAILQ_INSERT_BEFORE(up, upn, list);
  751         }
  752         up->len = 1;
  753         up->ptr = uh;
  754 
  755 done:
  756         last = uh->high - uh->low - (item - uh->low);
  757         if (uh->last > last)
  758                 uh->last = last;
  759         uh->busy++;
  760         collapse_unr(uh, up);
  761         check_unrhdr(uh, __LINE__);
  762         return (item);
  763 }
  764 
  765 int
  766 alloc_unr_specific(struct unrhdr *uh, u_int item)
  767 {
  768         void *p1, *p2;
  769         int i;
  770 
  771         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
  772 
  773         p1 = Malloc(sizeof(struct unr));
  774         p2 = Malloc(sizeof(struct unr));
  775 
  776         if (uh->mtx != NULL)
  777                 mtx_lock(uh->mtx);
  778         i = alloc_unr_specificl(uh, item, &p1, &p2);
  779         if (uh->mtx != NULL)
  780                 mtx_unlock(uh->mtx);
  781 
  782         if (p1 != NULL)
  783                 Free(p1);
  784         if (p2 != NULL)
  785                 Free(p2);
  786 
  787         return (i);
  788 }
  789 
  790 /*
  791  * Free a unr.
  792  *
  793  * If we can save unrs by using a bitmap, do so.
  794  */
  795 static void
  796 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  797 {
  798         struct unr *up, *upp, *upn;
  799         struct unrb *ub;
  800         u_int pl;
  801 
  802         KASSERT(item >= uh->low && item <= uh->high,
  803             ("UNR: free_unr(%u) out of range [%u...%u]",
  804              item, uh->low, uh->high));
  805         check_unrhdr(uh, __LINE__);
  806         item -= uh->low;
  807         upp = TAILQ_FIRST(&uh->head);
  808         /*
  809          * Freeing in the ideal split case
  810          */
  811         if (item + 1 == uh->first && upp == NULL) {
  812                 uh->last++;
  813                 uh->first--;
  814                 uh->busy--;
  815                 check_unrhdr(uh, __LINE__);
  816                 return;
  817         }
  818         /*
  819          * Freeing in the ->first section.  Create a run starting at the
  820          * freed item.  The code below will subdivide it.
  821          */
  822         if (item < uh->first) {
  823                 up = new_unr(uh, p1, p2);
  824                 up->ptr = uh;
  825                 up->len = uh->first - item;
  826                 TAILQ_INSERT_HEAD(&uh->head, up, list);
  827                 uh->first -= up->len;
  828         }
  829 
  830         item -= uh->first;
  831 
  832         /* Find the item which contains the unit we want to free */
  833         TAILQ_FOREACH(up, &uh->head, list) {
  834                 if (up->len > item)
  835                         break;
  836                 item -= up->len;
  837         }
  838 
  839         /* Handle bitmap items */
  840         if (is_bitmap(uh, up)) {
  841                 ub = up->ptr;
  842 
  843                 KASSERT(bit_test(ub->map, item) != 0,
  844                     ("UNR: Freeing free item %d (bitmap)\n", item));
  845                 bit_clear(ub->map, item);
  846                 uh->busy--;
  847                 collapse_unr(uh, up);
  848                 return;
  849         }
  850 
  851         KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
  852 
  853         /* Just this one left, reap it */
  854         if (up->len == 1) {
  855                 up->ptr = NULL;
  856                 uh->busy--;
  857                 collapse_unr(uh, up);
  858                 return;
  859         }
  860 
  861         /* Check if we can shift the item into the previous 'free' run */
  862         upp = TAILQ_PREV(up, unrhd, list);
  863         if (item == 0 && upp != NULL && upp->ptr == NULL) {
  864                 upp->len++;
  865                 up->len--;
  866                 uh->busy--;
  867                 collapse_unr(uh, up);
  868                 return;
  869         }
  870 
  871         /* Check if we can shift the item to the next 'free' run */
  872         upn = TAILQ_NEXT(up, list);
  873         if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
  874                 upn->len++;
  875                 up->len--;
  876                 uh->busy--;
  877                 collapse_unr(uh, up);
  878                 return;
  879         }
  880 
  881         /* Split off the tail end, if any. */
  882         pl = up->len - (1 + item);
  883         if (pl > 0) {
  884                 upp = new_unr(uh, p1, p2);
  885                 upp->ptr = uh;
  886                 upp->len = pl;
  887                 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
  888         }
  889 
  890         /* Split off head end, if any */
  891         if (item > 0) {
  892                 upp = new_unr(uh, p1, p2);
  893                 upp->len = item;
  894                 upp->ptr = uh;
  895                 TAILQ_INSERT_BEFORE(up, upp, list);
  896         }
  897         up->len = 1;
  898         up->ptr = NULL;
  899         uh->busy--;
  900         collapse_unr(uh, up);
  901 }
  902 
  903 void
  904 free_unr(struct unrhdr *uh, u_int item)
  905 {
  906         void *p1, *p2;
  907 
  908         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
  909         p1 = Malloc(sizeof(struct unr));
  910         p2 = Malloc(sizeof(struct unr));
  911         if (uh->mtx != NULL)
  912                 mtx_lock(uh->mtx);
  913         free_unrl(uh, item, &p1, &p2);
  914         clean_unrhdrl(uh);
  915         if (uh->mtx != NULL)
  916                 mtx_unlock(uh->mtx);
  917         if (p1 != NULL)
  918                 Free(p1);
  919         if (p2 != NULL)
  920                 Free(p2);
  921 }
  922 
  923 #ifndef _KERNEL /* USERLAND test driver */
  924 
  925 /*
  926  * Simple stochastic test driver for the above functions.  The code resides
  927  * here so that it can access static functions and structures.
  928  */
  929 
  930 static bool verbose;
  931 #define VPRINTF(...)    {if (verbose) printf(__VA_ARGS__);}
  932 
  933 static void
  934 print_unr(struct unrhdr *uh, struct unr *up)
  935 {
  936         u_int x;
  937         struct unrb *ub;
  938 
  939         printf("  %p len = %5u ", up, up->len);
  940         if (up->ptr == NULL)
  941                 printf("free\n");
  942         else if (up->ptr == uh)
  943                 printf("alloc\n");
  944         else {
  945                 ub = up->ptr;
  946                 printf("bitmap [");
  947                 for (x = 0; x < up->len; x++) {
  948                         if (bit_test(ub->map, x))
  949                                 printf("#");
  950                         else
  951                                 printf(" ");
  952                 }
  953                 printf("]\n");
  954         }
  955 }
  956 
  957 static void
  958 print_unrhdr(struct unrhdr *uh)
  959 {
  960         struct unr *up;
  961         u_int x;
  962 
  963         printf(
  964             "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
  965             uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
  966         x = uh->low + uh->first;
  967         TAILQ_FOREACH(up, &uh->head, list) {
  968                 printf("  from = %5u", x);
  969                 print_unr(uh, up);
  970                 if (up->ptr == NULL || up->ptr == uh)
  971                         x += up->len;
  972                 else
  973                         x += NBITS;
  974         }
  975 }
  976 
  977 static void
  978 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
  979 {
  980         int j;
  981 
  982         if (a[i]) {
  983                 VPRINTF("F %u\n", i);
  984                 free_unr(uh, i);
  985                 a[i] = 0;
  986         } else {
  987                 no_alloc = 1;
  988                 j = alloc_unr(uh);
  989                 if (j != -1) {
  990                         a[j] = 1;
  991                         VPRINTF("A %d\n", j);
  992                 }
  993                 no_alloc = 0;
  994         }
  995 }
  996 
  997 static void
  998 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
  999 {
 1000         int j;
 1001 
 1002         j = alloc_unr_specific(uh, i);
 1003         if (j == -1) {
 1004                 VPRINTF("F %u\n", i);
 1005                 a[i] = 0;
 1006                 free_unr(uh, i);
 1007         } else {
 1008                 a[i] = 1;
 1009                 VPRINTF("A %d\n", j);
 1010         }
 1011 }
 1012 
 1013 static void
 1014 usage(char** argv)
 1015 {
 1016         printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
 1017 }
 1018 
 1019 int
 1020 main(int argc, char **argv)
 1021 {
 1022         struct unrhdr *uh;
 1023         char *a;
 1024         long count = 10000;     /* Number of unrs to test */
 1025         long reps = 1, m;
 1026         int ch;
 1027         u_int i;
 1028 
 1029         verbose = false;
 1030 
 1031         while ((ch = getopt(argc, argv, "hr:v")) != -1) {
 1032                 switch (ch) {
 1033                 case 'r':
 1034                         errno = 0;
 1035                         reps = strtol(optarg, NULL, 0);
 1036                         if (errno == ERANGE || errno == EINVAL) {
 1037                                 usage(argv);
 1038                                 exit(2);
 1039                         }
 1040 
 1041                         break;
 1042                 case 'v':
 1043                         verbose = true;
 1044                         break;
 1045                 case 'h':
 1046                 default:
 1047                         usage(argv);
 1048                         exit(2);
 1049                 }
 1050         }
 1051 
 1052         setbuf(stdout, NULL);
 1053         uh = new_unrhdr(0, count - 1, NULL);
 1054         print_unrhdr(uh);
 1055 
 1056         a = calloc(count, sizeof(char));
 1057         if (a == NULL)
 1058                 err(1, "calloc failed");
 1059 
 1060         printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
 1061         printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
 1062         printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
 1063         printf("NBITS %lu\n", (unsigned long)NBITS);
 1064         for (m = 0; m < count * reps; m++) {
 1065                 i = arc4random_uniform(count);
 1066 #if 0
 1067                 if (a[i] && (j & 1))
 1068                         continue;
 1069 #endif
 1070                 if ((arc4random() & 1) != 0)
 1071                         test_alloc_unr(uh, i, a);
 1072                 else
 1073                         test_alloc_unr_specific(uh, i, a);
 1074 
 1075                 if (verbose)
 1076                         print_unrhdr(uh);
 1077                 check_unrhdr(uh, __LINE__);
 1078         }
 1079         for (i = 0; i < (u_int)count; i++) {
 1080                 if (a[i]) {
 1081                         if (verbose) {
 1082                                 printf("C %u\n", i);
 1083                                 print_unrhdr(uh);
 1084                         }
 1085                         free_unr(uh, i);
 1086                 }
 1087         }
 1088         print_unrhdr(uh);
 1089         delete_unrhdr(uh);
 1090         free(a);
 1091         return (0);
 1092 }
 1093 #endif

Cache object: 4ca936e34458d6668c427663d81865a5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.