The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_unit.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2004 Poul-Henning Kamp
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD: releng/12.0/sys/kern/subr_unit.c 326271 2017-11-27 15:20:12Z pfg $
   29  *
   30  *
   31  * Unit number allocation functions.
   32  *
   33  * These functions implement a mixed run-length/bitmap management of unit
   34  * number spaces in a very memory efficient manner.
   35  *
   36  * Allocation policy is always lowest free number first.
   37  *
   38  * A return value of -1 signals that no more unit numbers are available.
   39  *
   40  * There is no cost associated with the range of unitnumbers, so unless
   41  * the resource really is finite, specify INT_MAX to new_unrhdr() and
   42  * forget about checking the return value.
   43  *
   44  * If a mutex is not provided when the unit number space is created, a
   45  * default global mutex is used.  The advantage to passing a mutex in, is
   46  * that the alloc_unrl() function can be called with the mutex already
   47  * held (it will not be released by alloc_unrl()).
   48  *
   49  * The allocation function alloc_unr{l}() never sleeps (but it may block on
   50  * the mutex of course).
   51  *
   52  * Freeing a unit number may require allocating memory, and can therefore
   53  * sleep so the free_unr() function does not come in a pre-locked variant.
   54  *
   55  * A userland test program is included.
   56  *
   57  * Memory usage is a very complex function of the exact allocation
   58  * pattern, but always very compact:
   59  *    * For the very typical case where a single unbroken run of unit
   60  *      numbers are allocated 44 bytes are used on i386.
   61  *    * For a unit number space of 1000 units and the random pattern
   62  *      in the usermode test program included, the worst case usage
   63  *      was 252 bytes on i386 for 500 allocated and 500 free units.
   64  *    * For a unit number space of 10000 units and the random pattern
   65  *      in the usermode test program included, the worst case usage
   66  *      was 798 bytes on i386 for 5000 allocated and 5000 free units.
   67  *    * The worst case is where every other unit number is allocated and
   68  *      the rest are free.  In that case 44 + N/4 bytes are used where
   69  *      N is the number of the highest unit allocated.
   70  */
   71 
   72 #include <sys/param.h>
   73 #include <sys/types.h>
   74 #include <sys/_unrhdr.h>
   75 
   76 #ifdef _KERNEL
   77 
   78 #include <sys/bitstring.h>
   79 #include <sys/malloc.h>
   80 #include <sys/kernel.h>
   81 #include <sys/systm.h>
   82 #include <sys/limits.h>
   83 #include <sys/lock.h>
   84 #include <sys/mutex.h>
   85 
   86 /*
   87  * In theory it would be smarter to allocate the individual blocks
   88  * with the zone allocator, but at this time the expectation is that
   89  * there will typically not even be enough allocations to fill a single
   90  * page, so we stick with malloc for now.
   91  */
   92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
   93 
   94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
   95 #define Free(foo) free(foo, M_UNIT)
   96 
   97 static struct mtx unitmtx;
   98 
   99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
  100 
  101 #else /* ...USERLAND */
  102 
  103 #include <bitstring.h>
  104 #include <err.h>
  105 #include <errno.h>
  106 #include <getopt.h>
  107 #include <stdbool.h>
  108 #include <stdio.h>
  109 #include <stdlib.h>
  110 #include <string.h>
  111 
  112 #define KASSERT(cond, arg) \
  113         do { \
  114                 if (!(cond)) { \
  115                         printf arg; \
  116                         abort(); \
  117                 } \
  118         } while (0)
  119 
  120 static int no_alloc;
  121 #define Malloc(foo) _Malloc(foo, __LINE__)
  122 static void *
  123 _Malloc(size_t foo, int line)
  124 {
  125 
  126         KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
  127         return (calloc(foo, 1));
  128 }
  129 #define Free(foo) free(foo)
  130 
  131 struct unrhdr;
  132 
  133 
  134 struct mtx {
  135         int     state;
  136 } unitmtx;
  137 
  138 static void
  139 mtx_lock(struct mtx *mp)
  140 {
  141         KASSERT(mp->state == 0, ("mutex already locked"));
  142         mp->state = 1;
  143 }
  144 
  145 static void
  146 mtx_unlock(struct mtx *mp)
  147 {
  148         KASSERT(mp->state == 1, ("mutex not locked"));
  149         mp->state = 0;
  150 }
  151 
  152 #define MA_OWNED        9
  153 
  154 static void
  155 mtx_assert(struct mtx *mp, int flag)
  156 {
  157         if (flag == MA_OWNED) {
  158                 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
  159         }
  160 }
  161 
  162 #define CTASSERT(foo)
  163 #define WITNESS_WARN(flags, lock, fmt, ...)     (void)0
  164 
  165 #endif /* USERLAND */
  166 
  167 /*
  168  * This is our basic building block.
  169  *
  170  * It can be used in three different ways depending on the value of the ptr
  171  * element:
  172  *     If ptr is NULL, it represents a run of free items.
  173  *     If ptr points to the unrhdr it represents a run of allocated items.
  174  *     Otherwise it points to a bitstring of allocated items.
  175  *
  176  * For runs the len field is the length of the run.
  177  * For bitmaps the len field represents the number of allocated items.
  178  *
  179  * The bitmap is the same size as struct unr to optimize memory management.
  180  */
  181 struct unr {
  182         TAILQ_ENTRY(unr)        list;
  183         u_int                   len;
  184         void                    *ptr;
  185 };
  186 
  187 struct unrb {
  188         bitstr_t                map[sizeof(struct unr) / sizeof(bitstr_t)];
  189 };
  190 
  191 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
  192 
  193 /* Number of bits we can store in the bitmap */
  194 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map))
  195 
  196 /* Is the unrb empty in at least the first len bits? */
  197 static inline bool
  198 ub_empty(struct unrb *ub, int len) {
  199         int first_set;
  200 
  201         bit_ffs(ub->map, len, &first_set);
  202         return (first_set == -1);
  203 }
  204 
  205 /* Is the unrb full?  That is, is the number of set elements equal to len? */
  206 static inline bool
  207 ub_full(struct unrb *ub, int len)
  208 {
  209         int first_clear;
  210 
  211         bit_ffc(ub->map, len, &first_clear);
  212         return (first_clear == -1);
  213 }
  214 
  215 
  216 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
  217 /*
  218  * Consistency check function.
  219  *
  220  * Checks the internal consistency as well as we can.
  221  *
  222  * Called at all boundaries of this API.
  223  */
  224 static void
  225 check_unrhdr(struct unrhdr *uh, int line)
  226 {
  227         struct unr *up;
  228         struct unrb *ub;
  229         int w;
  230         u_int y, z;
  231 
  232         y = uh->first;
  233         z = 0;
  234         TAILQ_FOREACH(up, &uh->head, list) {
  235                 z++;
  236                 if (up->ptr != uh && up->ptr != NULL) {
  237                         ub = up->ptr;
  238                         KASSERT (up->len <= NBITS,
  239                             ("UNR inconsistency: len %u max %zd (line %d)\n",
  240                             up->len, NBITS, line));
  241                         z++;
  242                         w = 0;
  243                         bit_count(ub->map, 0, up->len, &w);
  244                         y += w;
  245                 } else if (up->ptr != NULL)
  246                         y += up->len;
  247         }
  248         KASSERT (y == uh->busy,
  249             ("UNR inconsistency: items %u found %u (line %d)\n",
  250             uh->busy, y, line));
  251         KASSERT (z == uh->alloc,
  252             ("UNR inconsistency: chunks %u found %u (line %d)\n",
  253             uh->alloc, z, line));
  254 }
  255 
  256 #else
  257 
  258 static __inline void
  259 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
  260 {
  261 
  262 }
  263 
  264 #endif
  265 
  266 
  267 /*
  268  * Userland memory management.  Just use calloc and keep track of how
  269  * many elements we have allocated for check_unrhdr().
  270  */
  271 
  272 static __inline void *
  273 new_unr(struct unrhdr *uh, void **p1, void **p2)
  274 {
  275         void *p;
  276 
  277         uh->alloc++;
  278         KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
  279         if (*p1 != NULL) {
  280                 p = *p1;
  281                 *p1 = NULL;
  282                 return (p);
  283         } else {
  284                 p = *p2;
  285                 *p2 = NULL;
  286                 return (p);
  287         }
  288 }
  289 
  290 static __inline void
  291 delete_unr(struct unrhdr *uh, void *ptr)
  292 {
  293         struct unr *up;
  294 
  295         uh->alloc--;
  296         up = ptr;
  297         TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
  298 }
  299 
  300 void
  301 clean_unrhdrl(struct unrhdr *uh)
  302 {
  303         struct unr *up;
  304 
  305         mtx_assert(uh->mtx, MA_OWNED);
  306         while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
  307                 TAILQ_REMOVE(&uh->ppfree, up, list);
  308                 mtx_unlock(uh->mtx);
  309                 Free(up);
  310                 mtx_lock(uh->mtx);
  311         }
  312 
  313 }
  314 
  315 void
  316 clean_unrhdr(struct unrhdr *uh)
  317 {
  318 
  319         mtx_lock(uh->mtx);
  320         clean_unrhdrl(uh);
  321         mtx_unlock(uh->mtx);
  322 }
  323 
  324 void
  325 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
  326 {
  327 
  328         KASSERT(low >= 0 && low <= high,
  329             ("UNR: use error: new_unrhdr(%d, %d)", low, high));
  330         if (mutex != NULL)
  331                 uh->mtx = mutex;
  332         else
  333                 uh->mtx = &unitmtx;
  334         TAILQ_INIT(&uh->head);
  335         TAILQ_INIT(&uh->ppfree);
  336         uh->low = low;
  337         uh->high = high;
  338         uh->first = 0;
  339         uh->last = 1 + (high - low);
  340         check_unrhdr(uh, __LINE__);
  341 }
  342 
  343 /*
  344  * Allocate a new unrheader set.
  345  *
  346  * Highest and lowest valid values given as parameters.
  347  */
  348 
  349 struct unrhdr *
  350 new_unrhdr(int low, int high, struct mtx *mutex)
  351 {
  352         struct unrhdr *uh;
  353 
  354         uh = Malloc(sizeof *uh);
  355         init_unrhdr(uh, low, high, mutex);
  356         return (uh);
  357 }
  358 
  359 void
  360 delete_unrhdr(struct unrhdr *uh)
  361 {
  362 
  363         check_unrhdr(uh, __LINE__);
  364         KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
  365         KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
  366         KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
  367             ("unrhdr has postponed item for free"));
  368         Free(uh);
  369 }
  370 
  371 void
  372 clear_unrhdr(struct unrhdr *uh)
  373 {
  374         struct unr *up, *uq;
  375 
  376         KASSERT(TAILQ_EMPTY(&uh->ppfree),
  377             ("unrhdr has postponed item for free"));
  378         TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
  379                 if (up->ptr != uh) {
  380                         Free(up->ptr);
  381                 }
  382                 Free(up);
  383         }
  384         uh->busy = 0;
  385         uh->alloc = 0;
  386         init_unrhdr(uh, uh->low, uh->high, uh->mtx);
  387 
  388         check_unrhdr(uh, __LINE__);
  389 }
  390 
  391 static __inline int
  392 is_bitmap(struct unrhdr *uh, struct unr *up)
  393 {
  394         return (up->ptr != uh && up->ptr != NULL);
  395 }
  396 
  397 /*
  398  * Look for sequence of items which can be combined into a bitmap, if
  399  * multiple are present, take the one which saves most memory.
  400  *
  401  * Return (1) if a sequence was found to indicate that another call
  402  * might be able to do more.  Return (0) if we found no suitable sequence.
  403  *
  404  * NB: called from alloc_unr(), no new memory allocation allowed.
  405  */
  406 static int
  407 optimize_unr(struct unrhdr *uh)
  408 {
  409         struct unr *up, *uf, *us;
  410         struct unrb *ub, *ubf;
  411         u_int a, l, ba;
  412 
  413         /*
  414          * Look for the run of items (if any) which when collapsed into
  415          * a bitmap would save most memory.
  416          */
  417         us = NULL;
  418         ba = 0;
  419         TAILQ_FOREACH(uf, &uh->head, list) {
  420                 if (uf->len >= NBITS)
  421                         continue;
  422                 a = 1;
  423                 if (is_bitmap(uh, uf))
  424                         a++;
  425                 l = uf->len;
  426                 up = uf;
  427                 while (1) {
  428                         up = TAILQ_NEXT(up, list);
  429                         if (up == NULL)
  430                                 break;
  431                         if ((up->len + l) > NBITS)
  432                                 break;
  433                         a++;
  434                         if (is_bitmap(uh, up))
  435                                 a++;
  436                         l += up->len;
  437                 }
  438                 if (a > ba) {
  439                         ba = a;
  440                         us = uf;
  441                 }
  442         }
  443         if (ba < 3)
  444                 return (0);
  445 
  446         /*
  447          * If the first element is not a bitmap, make it one.
  448          * Trying to do so without allocating more memory complicates things
  449          * a bit
  450          */
  451         if (!is_bitmap(uh, us)) {
  452                 uf = TAILQ_NEXT(us, list);
  453                 TAILQ_REMOVE(&uh->head, us, list);
  454                 a = us->len;
  455                 l = us->ptr == uh ? 1 : 0;
  456                 ub = (void *)us;
  457                 bit_nclear(ub->map, 0, NBITS - 1);
  458                 if (l)
  459                         bit_nset(ub->map, 0, a);
  460                 if (!is_bitmap(uh, uf)) {
  461                         if (uf->ptr == NULL)
  462                                 bit_nclear(ub->map, a, a + uf->len - 1);
  463                         else
  464                                 bit_nset(ub->map, a, a + uf->len - 1);
  465                         uf->ptr = ub;
  466                         uf->len += a;
  467                         us = uf;
  468                 } else {
  469                         ubf = uf->ptr;
  470                         for (l = 0; l < uf->len; l++, a++) {
  471                                 if (bit_test(ubf->map, l))
  472                                         bit_set(ub->map, a);
  473                                 else
  474                                         bit_clear(ub->map, a);
  475                         }
  476                         uf->len = a;
  477                         delete_unr(uh, uf->ptr);
  478                         uf->ptr = ub;
  479                         us = uf;
  480                 }
  481         }
  482         ub = us->ptr;
  483         while (1) {
  484                 uf = TAILQ_NEXT(us, list);
  485                 if (uf == NULL)
  486                         return (1);
  487                 if (uf->len + us->len > NBITS)
  488                         return (1);
  489                 if (uf->ptr == NULL) {
  490                         bit_nclear(ub->map, us->len, us->len + uf->len - 1);
  491                         us->len += uf->len;
  492                         TAILQ_REMOVE(&uh->head, uf, list);
  493                         delete_unr(uh, uf);
  494                 } else if (uf->ptr == uh) {
  495                         bit_nset(ub->map, us->len, us->len + uf->len - 1);
  496                         us->len += uf->len;
  497                         TAILQ_REMOVE(&uh->head, uf, list);
  498                         delete_unr(uh, uf);
  499                 } else {
  500                         ubf = uf->ptr;
  501                         for (l = 0; l < uf->len; l++, us->len++) {
  502                                 if (bit_test(ubf->map, l))
  503                                         bit_set(ub->map, us->len);
  504                                 else
  505                                         bit_clear(ub->map, us->len);
  506                         }
  507                         TAILQ_REMOVE(&uh->head, uf, list);
  508                         delete_unr(uh, ubf);
  509                         delete_unr(uh, uf);
  510                 }
  511         }
  512 }
  513 
  514 /*
  515  * See if a given unr should be collapsed with a neighbor.
  516  *
  517  * NB: called from alloc_unr(), no new memory allocation allowed.
  518  */
  519 static void
  520 collapse_unr(struct unrhdr *uh, struct unr *up)
  521 {
  522         struct unr *upp;
  523         struct unrb *ub;
  524 
  525         /* If bitmap is all set or clear, change it to runlength */
  526         if (is_bitmap(uh, up)) {
  527                 ub = up->ptr;
  528                 if (ub_full(ub, up->len)) {
  529                         delete_unr(uh, up->ptr);
  530                         up->ptr = uh;
  531                 } else if (ub_empty(ub, up->len)) {
  532                         delete_unr(uh, up->ptr);
  533                         up->ptr = NULL;
  534                 }
  535         }
  536 
  537         /* If nothing left in runlength, delete it */
  538         if (up->len == 0) {
  539                 upp = TAILQ_PREV(up, unrhd, list);
  540                 if (upp == NULL)
  541                         upp = TAILQ_NEXT(up, list);
  542                 TAILQ_REMOVE(&uh->head, up, list);
  543                 delete_unr(uh, up);
  544                 up = upp;
  545         }
  546 
  547         /* If we have "hot-spot" still, merge with neighbor if possible */
  548         if (up != NULL) {
  549                 upp = TAILQ_PREV(up, unrhd, list);
  550                 if (upp != NULL && up->ptr == upp->ptr) {
  551                         up->len += upp->len;
  552                         TAILQ_REMOVE(&uh->head, upp, list);
  553                         delete_unr(uh, upp);
  554                         }
  555                 upp = TAILQ_NEXT(up, list);
  556                 if (upp != NULL && up->ptr == upp->ptr) {
  557                         up->len += upp->len;
  558                         TAILQ_REMOVE(&uh->head, upp, list);
  559                         delete_unr(uh, upp);
  560                 }
  561         }
  562 
  563         /* Merge into ->first if possible */
  564         upp = TAILQ_FIRST(&uh->head);
  565         if (upp != NULL && upp->ptr == uh) {
  566                 uh->first += upp->len;
  567                 TAILQ_REMOVE(&uh->head, upp, list);
  568                 delete_unr(uh, upp);
  569                 if (up == upp)
  570                         up = NULL;
  571         }
  572 
  573         /* Merge into ->last if possible */
  574         upp = TAILQ_LAST(&uh->head, unrhd);
  575         if (upp != NULL && upp->ptr == NULL) {
  576                 uh->last += upp->len;
  577                 TAILQ_REMOVE(&uh->head, upp, list);
  578                 delete_unr(uh, upp);
  579                 if (up == upp)
  580                         up = NULL;
  581         }
  582 
  583         /* Try to make bitmaps */
  584         while (optimize_unr(uh))
  585                 continue;
  586 }
  587 
  588 /*
  589  * Allocate a free unr.
  590  */
  591 int
  592 alloc_unrl(struct unrhdr *uh)
  593 {
  594         struct unr *up;
  595         struct unrb *ub;
  596         u_int x;
  597         int y;
  598 
  599         mtx_assert(uh->mtx, MA_OWNED);
  600         check_unrhdr(uh, __LINE__);
  601         x = uh->low + uh->first;
  602 
  603         up = TAILQ_FIRST(&uh->head);
  604 
  605         /*
  606          * If we have an ideal split, just adjust the first+last
  607          */
  608         if (up == NULL && uh->last > 0) {
  609                 uh->first++;
  610                 uh->last--;
  611                 uh->busy++;
  612                 return (x);
  613         }
  614 
  615         /*
  616          * We can always allocate from the first list element, so if we have
  617          * nothing on the list, we must have run out of unit numbers.
  618          */
  619         if (up == NULL)
  620                 return (-1);
  621 
  622         KASSERT(up->ptr != uh, ("UNR first element is allocated"));
  623 
  624         if (up->ptr == NULL) {  /* free run */
  625                 uh->first++;
  626                 up->len--;
  627         } else {                /* bitmap */
  628                 ub = up->ptr;
  629                 bit_ffc(ub->map, up->len, &y);
  630                 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
  631                 bit_set(ub->map, y);
  632                 x += y;
  633         }
  634         uh->busy++;
  635         collapse_unr(uh, up);
  636         return (x);
  637 }
  638 
  639 int
  640 alloc_unr(struct unrhdr *uh)
  641 {
  642         int i;
  643 
  644         mtx_lock(uh->mtx);
  645         i = alloc_unrl(uh);
  646         clean_unrhdrl(uh);
  647         mtx_unlock(uh->mtx);
  648         return (i);
  649 }
  650 
  651 static int
  652 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  653 {
  654         struct unr *up, *upn;
  655         struct unrb *ub;
  656         u_int i, last, tl;
  657 
  658         mtx_assert(uh->mtx, MA_OWNED);
  659 
  660         if (item < uh->low + uh->first || item > uh->high)
  661                 return (-1);
  662 
  663         up = TAILQ_FIRST(&uh->head);
  664         /* Ideal split. */
  665         if (up == NULL && item - uh->low == uh->first) {
  666                 uh->first++;
  667                 uh->last--;
  668                 uh->busy++;
  669                 check_unrhdr(uh, __LINE__);
  670                 return (item);
  671         }
  672 
  673         i = item - uh->low - uh->first;
  674 
  675         if (up == NULL) {
  676                 up = new_unr(uh, p1, p2);
  677                 up->ptr = NULL;
  678                 up->len = i;
  679                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  680                 up = new_unr(uh, p1, p2);
  681                 up->ptr = uh;
  682                 up->len = 1;
  683                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  684                 uh->last = uh->high - uh->low - i;
  685                 uh->busy++;
  686                 check_unrhdr(uh, __LINE__);
  687                 return (item);
  688         } else {
  689                 /* Find the item which contains the unit we want to allocate. */
  690                 TAILQ_FOREACH(up, &uh->head, list) {
  691                         if (up->len > i)
  692                                 break;
  693                         i -= up->len;
  694                 }
  695         }
  696 
  697         if (up == NULL) {
  698                 if (i > 0) {
  699                         up = new_unr(uh, p1, p2);
  700                         up->ptr = NULL;
  701                         up->len = i;
  702                         TAILQ_INSERT_TAIL(&uh->head, up, list);
  703                 }
  704                 up = new_unr(uh, p1, p2);
  705                 up->ptr = uh;
  706                 up->len = 1;
  707                 TAILQ_INSERT_TAIL(&uh->head, up, list);
  708                 goto done;
  709         }
  710 
  711         if (is_bitmap(uh, up)) {
  712                 ub = up->ptr;
  713                 if (bit_test(ub->map, i) == 0) {
  714                         bit_set(ub->map, i);
  715                         goto done;
  716                 } else
  717                         return (-1);
  718         } else if (up->ptr == uh)
  719                 return (-1);
  720 
  721         KASSERT(up->ptr == NULL,
  722             ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
  723 
  724         /* Split off the tail end, if any. */
  725         tl = up->len - (1 + i);
  726         if (tl > 0) {
  727                 upn = new_unr(uh, p1, p2);
  728                 upn->ptr = NULL;
  729                 upn->len = tl;
  730                 TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
  731         }
  732 
  733         /* Split off head end, if any */
  734         if (i > 0) {
  735                 upn = new_unr(uh, p1, p2);
  736                 upn->len = i;
  737                 upn->ptr = NULL;
  738                 TAILQ_INSERT_BEFORE(up, upn, list);
  739         }
  740         up->len = 1;
  741         up->ptr = uh;
  742 
  743 done:
  744         last = uh->high - uh->low - (item - uh->low);
  745         if (uh->last > last)
  746                 uh->last = last;
  747         uh->busy++;
  748         collapse_unr(uh, up);
  749         check_unrhdr(uh, __LINE__);
  750         return (item);
  751 }
  752 
  753 int
  754 alloc_unr_specific(struct unrhdr *uh, u_int item)
  755 {
  756         void *p1, *p2;
  757         int i;
  758 
  759         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
  760 
  761         p1 = Malloc(sizeof(struct unr));
  762         p2 = Malloc(sizeof(struct unr));
  763 
  764         mtx_lock(uh->mtx);
  765         i = alloc_unr_specificl(uh, item, &p1, &p2);
  766         mtx_unlock(uh->mtx);
  767 
  768         if (p1 != NULL)
  769                 Free(p1);
  770         if (p2 != NULL)
  771                 Free(p2);
  772 
  773         return (i);
  774 }
  775 
  776 /*
  777  * Free a unr.
  778  *
  779  * If we can save unrs by using a bitmap, do so.
  780  */
  781 static void
  782 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
  783 {
  784         struct unr *up, *upp, *upn;
  785         struct unrb *ub;
  786         u_int pl;
  787 
  788         KASSERT(item >= uh->low && item <= uh->high,
  789             ("UNR: free_unr(%u) out of range [%u...%u]",
  790              item, uh->low, uh->high));
  791         check_unrhdr(uh, __LINE__);
  792         item -= uh->low;
  793         upp = TAILQ_FIRST(&uh->head);
  794         /*
  795          * Freeing in the ideal split case
  796          */
  797         if (item + 1 == uh->first && upp == NULL) {
  798                 uh->last++;
  799                 uh->first--;
  800                 uh->busy--;
  801                 check_unrhdr(uh, __LINE__);
  802                 return;
  803         }
  804         /*
  805          * Freeing in the ->first section.  Create a run starting at the
  806          * freed item.  The code below will subdivide it.
  807          */
  808         if (item < uh->first) {
  809                 up = new_unr(uh, p1, p2);
  810                 up->ptr = uh;
  811                 up->len = uh->first - item;
  812                 TAILQ_INSERT_HEAD(&uh->head, up, list);
  813                 uh->first -= up->len;
  814         }
  815 
  816         item -= uh->first;
  817 
  818         /* Find the item which contains the unit we want to free */
  819         TAILQ_FOREACH(up, &uh->head, list) {
  820                 if (up->len > item)
  821                         break;
  822                 item -= up->len;
  823         }
  824 
  825         /* Handle bitmap items */
  826         if (is_bitmap(uh, up)) {
  827                 ub = up->ptr;
  828 
  829                 KASSERT(bit_test(ub->map, item) != 0,
  830                     ("UNR: Freeing free item %d (bitmap)\n", item));
  831                 bit_clear(ub->map, item);
  832                 uh->busy--;
  833                 collapse_unr(uh, up);
  834                 return;
  835         }
  836 
  837         KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
  838 
  839         /* Just this one left, reap it */
  840         if (up->len == 1) {
  841                 up->ptr = NULL;
  842                 uh->busy--;
  843                 collapse_unr(uh, up);
  844                 return;
  845         }
  846 
  847         /* Check if we can shift the item into the previous 'free' run */
  848         upp = TAILQ_PREV(up, unrhd, list);
  849         if (item == 0 && upp != NULL && upp->ptr == NULL) {
  850                 upp->len++;
  851                 up->len--;
  852                 uh->busy--;
  853                 collapse_unr(uh, up);
  854                 return;
  855         }
  856 
  857         /* Check if we can shift the item to the next 'free' run */
  858         upn = TAILQ_NEXT(up, list);
  859         if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
  860                 upn->len++;
  861                 up->len--;
  862                 uh->busy--;
  863                 collapse_unr(uh, up);
  864                 return;
  865         }
  866 
  867         /* Split off the tail end, if any. */
  868         pl = up->len - (1 + item);
  869         if (pl > 0) {
  870                 upp = new_unr(uh, p1, p2);
  871                 upp->ptr = uh;
  872                 upp->len = pl;
  873                 TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
  874         }
  875 
  876         /* Split off head end, if any */
  877         if (item > 0) {
  878                 upp = new_unr(uh, p1, p2);
  879                 upp->len = item;
  880                 upp->ptr = uh;
  881                 TAILQ_INSERT_BEFORE(up, upp, list);
  882         }
  883         up->len = 1;
  884         up->ptr = NULL;
  885         uh->busy--;
  886         collapse_unr(uh, up);
  887 }
  888 
  889 void
  890 free_unr(struct unrhdr *uh, u_int item)
  891 {
  892         void *p1, *p2;
  893 
  894         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
  895         p1 = Malloc(sizeof(struct unr));
  896         p2 = Malloc(sizeof(struct unr));
  897         mtx_lock(uh->mtx);
  898         free_unrl(uh, item, &p1, &p2);
  899         clean_unrhdrl(uh);
  900         mtx_unlock(uh->mtx);
  901         if (p1 != NULL)
  902                 Free(p1);
  903         if (p2 != NULL)
  904                 Free(p2);
  905 }
  906 
  907 #ifndef _KERNEL /* USERLAND test driver */
  908 
  909 /*
  910  * Simple stochastic test driver for the above functions.  The code resides
  911  * here so that it can access static functions and structures.
  912  */
  913 
  914 static bool verbose;
  915 #define VPRINTF(...)    {if (verbose) printf(__VA_ARGS__);}
  916 
  917 static void
  918 print_unr(struct unrhdr *uh, struct unr *up)
  919 {
  920         u_int x;
  921         struct unrb *ub;
  922 
  923         printf("  %p len = %5u ", up, up->len);
  924         if (up->ptr == NULL)
  925                 printf("free\n");
  926         else if (up->ptr == uh)
  927                 printf("alloc\n");
  928         else {
  929                 ub = up->ptr;
  930                 printf("bitmap [");
  931                 for (x = 0; x < up->len; x++) {
  932                         if (bit_test(ub->map, x))
  933                                 printf("#");
  934                         else
  935                                 printf(" ");
  936                 }
  937                 printf("]\n");
  938         }
  939 }
  940 
  941 static void
  942 print_unrhdr(struct unrhdr *uh)
  943 {
  944         struct unr *up;
  945         u_int x;
  946 
  947         printf(
  948             "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
  949             uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
  950         x = uh->low + uh->first;
  951         TAILQ_FOREACH(up, &uh->head, list) {
  952                 printf("  from = %5u", x);
  953                 print_unr(uh, up);
  954                 if (up->ptr == NULL || up->ptr == uh)
  955                         x += up->len;
  956                 else
  957                         x += NBITS;
  958         }
  959 }
  960 
  961 static void
  962 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
  963 {
  964         int j;
  965 
  966         if (a[i]) {
  967                 VPRINTF("F %u\n", i);
  968                 free_unr(uh, i);
  969                 a[i] = 0;
  970         } else {
  971                 no_alloc = 1;
  972                 j = alloc_unr(uh);
  973                 if (j != -1) {
  974                         a[j] = 1;
  975                         VPRINTF("A %d\n", j);
  976                 }
  977                 no_alloc = 0;
  978         }
  979 }
  980 
  981 static void
  982 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
  983 {
  984         int j;
  985 
  986         j = alloc_unr_specific(uh, i);
  987         if (j == -1) {
  988                 VPRINTF("F %u\n", i);
  989                 a[i] = 0;
  990                 free_unr(uh, i);
  991         } else {
  992                 a[i] = 1;
  993                 VPRINTF("A %d\n", j);
  994         }
  995 }
  996 
  997 static void
  998 usage(char** argv)
  999 {
 1000         printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]);
 1001 }
 1002 
 1003 int
 1004 main(int argc, char **argv)
 1005 {
 1006         struct unrhdr *uh;
 1007         char *a;
 1008         long count = 10000;     /* Number of unrs to test */
 1009         long reps = 1, m;
 1010         int ch;
 1011         u_int i, j;
 1012 
 1013         verbose = false;
 1014 
 1015         while ((ch = getopt(argc, argv, "hr:v")) != -1) {
 1016                 switch (ch) {
 1017                 case 'r':
 1018                         errno = 0;
 1019                         reps = strtol(optarg, NULL, 0);
 1020                         if (errno == ERANGE || errno == EINVAL) {
 1021                                 usage(argv);
 1022                                 exit(2);
 1023                         }
 1024 
 1025                         break;
 1026                 case 'v':
 1027                         verbose = true;
 1028                         break;
 1029                 case 'h':
 1030                 default:
 1031                         usage(argv);
 1032                         exit(2);
 1033                 }
 1034 
 1035 
 1036         }
 1037 
 1038         setbuf(stdout, NULL);
 1039         uh = new_unrhdr(0, count - 1, NULL);
 1040         print_unrhdr(uh);
 1041 
 1042         a = calloc(count, sizeof(char));
 1043         if (a == NULL)
 1044                 err(1, "calloc failed");
 1045         srandomdev();
 1046 
 1047         printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
 1048         printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
 1049         printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
 1050         printf("NBITS %lu\n", (unsigned long)NBITS);
 1051         for (m = 0; m < count * reps; m++) {
 1052                 j = random();
 1053                 i = (j >> 1) % count;
 1054 #if 0
 1055                 if (a[i] && (j & 1))
 1056                         continue;
 1057 #endif
 1058                 if ((random() & 1) != 0)
 1059                         test_alloc_unr(uh, i, a);
 1060                 else
 1061                         test_alloc_unr_specific(uh, i, a);
 1062 
 1063                 if (verbose)
 1064                         print_unrhdr(uh);
 1065                 check_unrhdr(uh, __LINE__);
 1066         }
 1067         for (i = 0; i < (u_int)count; i++) {
 1068                 if (a[i]) {
 1069                         if (verbose) {
 1070                                 printf("C %u\n", i);
 1071                                 print_unrhdr(uh);
 1072                         }
 1073                         free_unr(uh, i);
 1074                 }
 1075         }
 1076         print_unrhdr(uh);
 1077         delete_unrhdr(uh);
 1078         free(a);
 1079         return (0);
 1080 }
 1081 #endif

Cache object: 33e7c673a666c8b3634c3073879e57e8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.