The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_extent.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: subr_extent.c,v 1.51 2005/03/15 18:22:24 bouyer Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe and Matthias Drochner.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed by the NetBSD
   21  *      Foundation, Inc. and its contributors.
   22  * 4. Neither the name of The NetBSD Foundation nor the names of its
   23  *    contributors may be used to endorse or promote products derived
   24  *    from this software without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  */
   38 
   39 /*
   40  * General purpose extent manager.
   41  */
   42 
   43 #include <sys/cdefs.h>
   44 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.51 2005/03/15 18:22:24 bouyer Exp $");
   45 
   46 #ifdef _KERNEL
   47 #include "opt_lockdebug.h"
   48 
   49 #include <sys/param.h>
   50 #include <sys/extent.h>
   51 #include <sys/malloc.h>
   52 #include <sys/pool.h>
   53 #include <sys/time.h>
   54 #include <sys/systm.h>
   55 #include <sys/proc.h>
   56 #include <sys/lock.h>
   57 
   58 #include <uvm/uvm_extern.h>
   59 
   60 #define KMEM_IS_RUNNING         (kmem_map != NULL)
   61 #elif defined(_EXTENT_TESTING)
   62 /*
   63  * user-land definitions, so it can fit into a testing harness.
   64  */
   65 #include <sys/param.h>
   66 #include <sys/pool.h>
   67 #include <sys/extent.h>
   68 #include <errno.h>
   69 #include <stdlib.h>
   70 #include <stdio.h>
   71 #include <string.h>
   72 
   73 /*
   74  * Use multi-line #defines to avoid screwing up the kernel tags file;
   75  * without this, ctags produces a tags file where panic() shows up
   76  * in subr_extent.c rather than subr_prf.c.
   77  */
   78 #define \
   79 malloc(s, t, flags)             malloc(s)
   80 #define \
   81 free(p, t)                      free(p)
   82 #define \
   83 tsleep(chan, pri, str, timo)    (EWOULDBLOCK)
   84 #define \
   85 ltsleep(chan,pri,str,timo,lck)  (EWOULDBLOCK)
   86 #define \
   87 wakeup(chan)                    ((void)0)
   88 #define \
   89 pool_get(pool, flags)           malloc((pool)->pr_size,0,0)
   90 #define \
   91 pool_put(pool, rp)              free(rp,0)
   92 #define \
   93 panic(a)                        printf(a)
   94 #define \
   95 splhigh()                       (1)
   96 #define \
   97 splx(s)                         ((void)(s))
   98 
   99 #define \
  100 simple_lock_init(l)             ((void)(l))
  101 #define \
  102 simple_lock(l)                  ((void)(l))
  103 #define \
  104 simple_unlock(l)                ((void)(l))
  105 #define KMEM_IS_RUNNING                 (1)
  106 #endif
  107 
  108 static  void extent_insert_and_optimize(struct extent *, u_long, u_long,
  109             int, struct extent_region *, struct extent_region *);
  110 static  struct extent_region *extent_alloc_region_descriptor
  111            (struct extent *, int);
  112 static  void extent_free_region_descriptor(struct extent *,
  113             struct extent_region *);
  114 
  115 static struct pool expool;
  116 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER;
  117 static int expool_initialized;
  118 
  119 /*
  120  * Macro to align to an arbitrary power-of-two boundary.
  121  */
  122 #define EXTENT_ALIGN(_start, _align, _skew)             \
  123         (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
  124 
  125 /*
  126  * Create the extent_region pool.
  127  * (This is deferred until one of our callers thinks we can malloc()).
  128  */
  129 
  130 static __inline void
  131 expool_init(void)
  132 {
  133 
  134         simple_lock(&expool_init_slock);
  135         if (expool_initialized) {
  136                 simple_unlock(&expool_init_slock);
  137                 return;
  138         }
  139 
  140 #if defined(_KERNEL)
  141         pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
  142             "extent", NULL);
  143 #else
  144         expool.pr_size = sizeof(struct extent_region);
  145 #endif
  146 
  147         expool_initialized = 1;
  148         simple_unlock(&expool_init_slock);
  149 }
  150 
  151 /*
  152  * Allocate and initialize an extent map.
  153  */
  154 struct extent *
  155 extent_create(name, start, end, mtype, storage, storagesize, flags)
  156         const char *name;
  157         u_long start, end;
  158         struct malloc_type *mtype;
  159         caddr_t storage;
  160         size_t storagesize;
  161         int flags;
  162 {
  163         struct extent *ex;
  164         caddr_t cp = storage;
  165         size_t sz = storagesize;
  166         struct extent_region *rp;
  167         int fixed_extent = (storage != NULL);
  168         int s;
  169 
  170 #ifdef DIAGNOSTIC
  171         /* Check arguments. */
  172         if (name == NULL)
  173                 panic("extent_create: name == NULL");
  174         if (end < start) {
  175                 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
  176                     name, start, end);
  177                 panic("extent_create: end < start");
  178         }
  179         if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
  180                 panic("extent_create: fixed extent, bad storagesize 0x%lx",
  181                     (u_long)storagesize);
  182         if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
  183                 panic("extent_create: storage provided for non-fixed");
  184 #endif
  185 
  186         /* Allocate extent descriptor. */
  187         if (fixed_extent) {
  188                 struct extent_fixed *fex;
  189 
  190                 memset(storage, 0, storagesize);
  191 
  192                 /*
  193                  * Align all descriptors on "long" boundaries.
  194                  */
  195                 fex = (struct extent_fixed *)cp;
  196                 ex = (struct extent *)fex;
  197                 cp += ALIGN(sizeof(struct extent_fixed));
  198                 sz -= ALIGN(sizeof(struct extent_fixed));
  199                 fex->fex_storage = storage;
  200                 fex->fex_storagesize = storagesize;
  201 
  202                 /*
  203                  * In a fixed extent, we have to pre-allocate region
  204                  * descriptors and place them in the extent's freelist.
  205                  */
  206                 LIST_INIT(&fex->fex_freelist);
  207                 while (sz >= ALIGN(sizeof(struct extent_region))) {
  208                         rp = (struct extent_region *)cp;
  209                         cp += ALIGN(sizeof(struct extent_region));
  210                         sz -= ALIGN(sizeof(struct extent_region));
  211                         LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
  212                 }
  213         } else {
  214                 s = splhigh();
  215                 if (expool_initialized == 0)
  216                         expool_init();
  217                 splx(s);
  218 
  219                 ex = (struct extent *)malloc(sizeof(struct extent),
  220                     mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
  221                 if (ex == NULL)
  222                         return (NULL);
  223         }
  224 
  225         /* Fill in the extent descriptor and return it to the caller. */
  226         simple_lock_init(&ex->ex_slock);
  227         LIST_INIT(&ex->ex_regions);
  228         ex->ex_name = name;
  229         ex->ex_start = start;
  230         ex->ex_end = end;
  231         ex->ex_mtype = mtype;
  232         ex->ex_flags = 0;
  233         if (fixed_extent)
  234                 ex->ex_flags |= EXF_FIXED;
  235         if (flags & EX_NOCOALESCE)
  236                 ex->ex_flags |= EXF_NOCOALESCE;
  237         return (ex);
  238 }
  239 
  240 /*
  241  * Destroy an extent map.
  242  * Since we're freeing the data, there can't be any references
  243  * so we don't need any locking.
  244  */
  245 void
  246 extent_destroy(ex)
  247         struct extent *ex;
  248 {
  249         struct extent_region *rp, *orp;
  250 
  251 #ifdef DIAGNOSTIC
  252         /* Check arguments. */
  253         if (ex == NULL)
  254                 panic("extent_destroy: NULL extent");
  255 #endif
  256 
  257         /* Free all region descriptors in extent. */
  258         for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
  259                 orp = rp;
  260                 rp = LIST_NEXT(rp, er_link);
  261                 LIST_REMOVE(orp, er_link);
  262                 extent_free_region_descriptor(ex, orp);
  263         }
  264 
  265         /* If we're not a fixed extent, free the extent descriptor itself. */
  266         if ((ex->ex_flags & EXF_FIXED) == 0)
  267                 free(ex, ex->ex_mtype);
  268 }
  269 
  270 /*
  271  * Insert a region descriptor into the sorted region list after the
  272  * entry "after" or at the head of the list (if "after" is NULL).
  273  * The region descriptor we insert is passed in "rp".  We must
  274  * allocate the region descriptor before calling this function!
  275  * If we don't need the region descriptor, it will be freed here.
  276  */
  277 static void
  278 extent_insert_and_optimize(ex, start, size, flags, after, rp)
  279         struct extent *ex;
  280         u_long start, size;
  281         int flags;
  282         struct extent_region *after, *rp;
  283 {
  284         struct extent_region *nextr;
  285         int appended = 0;
  286 
  287         if (after == NULL) {
  288                 /*
  289                  * We're the first in the region list.  If there's
  290                  * a region after us, attempt to coalesce to save
  291                  * descriptor overhead.
  292                  */
  293                 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
  294                     (LIST_FIRST(&ex->ex_regions) != NULL) &&
  295                     ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
  296                         /*
  297                          * We can coalesce.  Prepend us to the first region.
  298                          */
  299                         LIST_FIRST(&ex->ex_regions)->er_start = start;
  300                         extent_free_region_descriptor(ex, rp);
  301                         return;
  302                 }
  303 
  304                 /*
  305                  * Can't coalesce.  Fill in the region descriptor
  306                  * in, and insert us at the head of the region list.
  307                  */
  308                 rp->er_start = start;
  309                 rp->er_end = start + (size - 1);
  310                 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
  311                 return;
  312         }
  313 
  314         /*
  315          * If EXF_NOCOALESCE is set, coalescing is disallowed.
  316          */
  317         if (ex->ex_flags & EXF_NOCOALESCE)
  318                 goto cant_coalesce;
  319 
  320         /*
  321          * Attempt to coalesce with the region before us.
  322          */
  323         if ((after->er_end + 1) == start) {
  324                 /*
  325                  * We can coalesce.  Append ourselves and make
  326                  * note of it.
  327                  */
  328                 after->er_end = start + (size - 1);
  329                 appended = 1;
  330         }
  331 
  332         /*
  333          * Attempt to coalesce with the region after us.
  334          */
  335         if ((LIST_NEXT(after, er_link) != NULL) &&
  336             ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
  337                 /*
  338                  * We can coalesce.  Note that if we appended ourselves
  339                  * to the previous region, we exactly fit the gap, and
  340                  * can free the "next" region descriptor.
  341                  */
  342                 if (appended) {
  343                         /*
  344                          * Yup, we can free it up.
  345                          */
  346                         after->er_end = LIST_NEXT(after, er_link)->er_end;
  347                         nextr = LIST_NEXT(after, er_link);
  348                         LIST_REMOVE(nextr, er_link);
  349                         extent_free_region_descriptor(ex, nextr);
  350                 } else {
  351                         /*
  352                          * Nope, just prepend us to the next region.
  353                          */
  354                         LIST_NEXT(after, er_link)->er_start = start;
  355                 }
  356 
  357                 extent_free_region_descriptor(ex, rp);
  358                 return;
  359         }
  360 
  361         /*
  362          * We weren't able to coalesce with the next region, but
  363          * we don't need to allocate a region descriptor if we
  364          * appended ourselves to the previous region.
  365          */
  366         if (appended) {
  367                 extent_free_region_descriptor(ex, rp);
  368                 return;
  369         }
  370 
  371  cant_coalesce:
  372 
  373         /*
  374          * Fill in the region descriptor and insert ourselves
  375          * into the region list.
  376          */
  377         rp->er_start = start;
  378         rp->er_end = start + (size - 1);
  379         LIST_INSERT_AFTER(after, rp, er_link);
  380 }
  381 
  382 /*
  383  * Allocate a specific region in an extent map.
  384  */
  385 int
  386 extent_alloc_region(ex, start, size, flags)
  387         struct extent *ex;
  388         u_long start, size;
  389         int flags;
  390 {
  391         struct extent_region *rp, *last, *myrp;
  392         u_long end = start + (size - 1);
  393         int error;
  394 
  395 #ifdef DIAGNOSTIC
  396         /* Check arguments. */
  397         if (ex == NULL)
  398                 panic("extent_alloc_region: NULL extent");
  399         if (size < 1) {
  400                 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
  401                     ex->ex_name, size);
  402                 panic("extent_alloc_region: bad size");
  403         }
  404         if (end < start) {
  405                 printf(
  406                  "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
  407                  ex->ex_name, start, size);
  408                 panic("extent_alloc_region: overflow");
  409         }
  410 #endif
  411 #ifdef LOCKDEBUG
  412         if (flags & EX_WAITSPACE)
  413                 simple_lock_only_held(NULL,
  414                     "extent_alloc_region(EX_WAITSPACE)");
  415 #endif
  416 
  417         /*
  418          * Make sure the requested region lies within the
  419          * extent.
  420          *
  421          * We don't lock to check the range, because those values
  422          * are never modified, and if another thread deletes the
  423          * extent, we're screwed anyway.
  424          */
  425         if ((start < ex->ex_start) || (end > ex->ex_end)) {
  426 #ifdef DIAGNOSTIC
  427                 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
  428                     ex->ex_name, ex->ex_start, ex->ex_end);
  429                 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
  430                     start, end);
  431                 panic("extent_alloc_region: region lies outside extent");
  432 #else
  433                 return (EINVAL);
  434 #endif
  435         }
  436 
  437         /*
  438          * Allocate the region descriptor.  It will be freed later
  439          * if we can coalesce with another region.  Don't lock before
  440          * here!  This could block.
  441          */
  442         myrp = extent_alloc_region_descriptor(ex, flags);
  443         if (myrp == NULL) {
  444 #ifdef DIAGNOSTIC
  445                 printf(
  446                     "extent_alloc_region: can't allocate region descriptor\n");
  447 #endif
  448                 return (ENOMEM);
  449         }
  450 
  451  alloc_start:
  452         simple_lock(&ex->ex_slock);
  453 
  454         /*
  455          * Attempt to place ourselves in the desired area of the
  456          * extent.  We save ourselves some work by keeping the list sorted.
  457          * In other words, if the start of the current region is greater
  458          * than the end of our region, we don't have to search any further.
  459          */
  460 
  461         /*
  462          * Keep a pointer to the last region we looked at so
  463          * that we don't have to traverse the list again when
  464          * we insert ourselves.  If "last" is NULL when we
  465          * finally insert ourselves, we go at the head of the
  466          * list.  See extent_insert_and_optimize() for details.
  467          */
  468         last = NULL;
  469 
  470         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  471                 if (rp->er_start > end) {
  472                         /*
  473                          * We lie before this region and don't
  474                          * conflict.
  475                          */
  476                         break;
  477                 }
  478 
  479                 /*
  480                  * The current region begins before we end.
  481                  * Check for a conflict.
  482                  */
  483                 if (rp->er_end >= start) {
  484                         /*
  485                          * We conflict.  If we can (and want to) wait,
  486                          * do so.
  487                          */
  488                         if (flags & EX_WAITSPACE) {
  489                                 ex->ex_flags |= EXF_WANTED;
  490                                 error = ltsleep(ex,
  491                                     PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
  492                                     "extnt", 0, &ex->ex_slock);
  493                                 if (error)
  494                                         return (error);
  495                                 goto alloc_start;
  496                         }
  497                         extent_free_region_descriptor(ex, myrp);
  498                         simple_unlock(&ex->ex_slock);
  499                         return (EAGAIN);
  500                 }
  501                 /*
  502                  * We don't conflict, but this region lies before
  503                  * us.  Keep a pointer to this region, and keep
  504                  * trying.
  505                  */
  506                 last = rp;
  507         }
  508 
  509         /*
  510          * We don't conflict with any regions.  "last" points
  511          * to the region we fall after, or is NULL if we belong
  512          * at the beginning of the region list.  Insert ourselves.
  513          */
  514         extent_insert_and_optimize(ex, start, size, flags, last, myrp);
  515         simple_unlock(&ex->ex_slock);
  516         return (0);
  517 }
  518 
  519 /*
  520  * Macro to check (x + y) <= z.  This check is designed to fail
  521  * if an overflow occurs.
  522  */
  523 #define LE_OV(x, y, z)  ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
  524 
  525 /*
  526  * Allocate a region in an extent map subregion.
  527  *
  528  * If EX_FAST is specified, we return the first fit in the map.
  529  * Otherwise, we try to minimize fragmentation by finding the
  530  * smallest gap that will hold the request.
  531  *
  532  * The allocated region is aligned to "alignment", which must be
  533  * a power of 2.
  534  */
  535 int
  536 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
  537     flags, result)
  538         struct extent *ex;
  539         u_long substart, subend, size, alignment, skew, boundary;
  540         int flags;
  541         u_long *result;
  542 {
  543         struct extent_region *rp, *myrp, *last, *bestlast;
  544         u_long newstart, newend, exend, beststart, bestovh, ovh;
  545         u_long dontcross;
  546         int error;
  547 
  548 #ifdef DIAGNOSTIC
  549         /*
  550          * Check arguments.
  551          *
  552          * We don't lock to check these, because these values
  553          * are never modified, and if another thread deletes the
  554          * extent, we're screwed anyway.
  555          */
  556         if (ex == NULL)
  557                 panic("extent_alloc_subregion: NULL extent");
  558         if (result == NULL)
  559                 panic("extent_alloc_subregion: NULL result pointer");
  560         if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
  561             (subend > ex->ex_end) || (subend < ex->ex_start)) {
  562   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
  563                     ex->ex_name, ex->ex_start, ex->ex_end);
  564                 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
  565                     substart, subend);
  566                 panic("extent_alloc_subregion: bad subregion");
  567         }
  568         if ((size < 1) || ((size - 1) > (subend - substart))) {
  569                 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
  570                     ex->ex_name, size);
  571                 panic("extent_alloc_subregion: bad size");
  572         }
  573         if (alignment == 0)
  574                 panic("extent_alloc_subregion: bad alignment");
  575         if (boundary && (boundary < size)) {
  576                 printf(
  577                     "extent_alloc_subregion: extent `%s', size 0x%lx, "
  578                     "boundary 0x%lx\n", ex->ex_name, size, boundary);
  579                 panic("extent_alloc_subregion: bad boundary");
  580         }
  581 #endif
  582 #ifdef LOCKDEBUG
  583         if (flags & EX_WAITSPACE)
  584                 simple_lock_only_held(NULL,
  585                     "extent_alloc_subregion1(EX_WAITSPACE)");
  586 #endif
  587 
  588         /*
  589          * Allocate the region descriptor.  It will be freed later
  590          * if we can coalesce with another region.  Don't lock before
  591          * here!  This could block.
  592          */
  593         myrp = extent_alloc_region_descriptor(ex, flags);
  594         if (myrp == NULL) {
  595 #ifdef DIAGNOSTIC
  596                 printf(
  597                  "extent_alloc_subregion: can't allocate region descriptor\n");
  598 #endif
  599                 return (ENOMEM);
  600         }
  601 
  602  alloc_start:
  603         simple_lock(&ex->ex_slock);
  604 
  605         /*
  606          * Keep a pointer to the last region we looked at so
  607          * that we don't have to traverse the list again when
  608          * we insert ourselves.  If "last" is NULL when we
  609          * finally insert ourselves, we go at the head of the
  610          * list.  See extent_insert_and_optimize() for deatails.
  611          */
  612         last = NULL;
  613 
  614         /*
  615          * Keep track of size and location of the smallest
  616          * chunk we fit in.
  617          *
  618          * Since the extent can be as large as the numeric range
  619          * of the CPU (0 - 0xffffffff for 32-bit systems), the
  620          * best overhead value can be the maximum unsigned integer.
  621          * Thus, we initialize "bestovh" to 0, since we insert ourselves
  622          * into the region list immediately on an exact match (which
  623          * is the only case where "bestovh" would be set to 0).
  624          */
  625         bestovh = 0;
  626         beststart = 0;
  627         bestlast = NULL;
  628 
  629         /*
  630          * Keep track of end of free region.  This is either the end of extent
  631          * or the start of a region past the subend.
  632          */
  633         exend = ex->ex_end;
  634 
  635         /*
  636          * For N allocated regions, we must make (N + 1)
  637          * checks for unallocated space.  The first chunk we
  638          * check is the area from the beginning of the subregion
  639          * to the first allocated region after that point.
  640          */
  641         newstart = EXTENT_ALIGN(substart, alignment, skew);
  642         if (newstart < ex->ex_start) {
  643 #ifdef DIAGNOSTIC
  644                 printf(
  645       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
  646                  ex->ex_name, ex->ex_start, ex->ex_end, alignment);
  647                 simple_unlock(&ex->ex_slock);
  648                 panic("extent_alloc_subregion: overflow after alignment");
  649 #else
  650                 extent_free_region_descriptor(ex, myrp);
  651                 simple_unlock(&ex->ex_slock);
  652                 return (EINVAL);
  653 #endif
  654         }
  655 
  656         /*
  657          * Find the first allocated region that begins on or after
  658          * the subregion start, advancing the "last" pointer along
  659          * the way.
  660          */
  661         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  662                 if (rp->er_start >= newstart)
  663                         break;
  664                 last = rp;
  665         }
  666 
  667         /*
  668          * Relocate the start of our candidate region to the end of
  669          * the last allocated region (if there was one overlapping
  670          * our subrange).
  671          */
  672         if (last != NULL && last->er_end >= newstart)
  673                 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
  674 
  675         for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
  676                 /*
  677                  * If the region pasts the subend, bail out and see
  678                  * if we fit against the subend.
  679                  */
  680                 if (rp->er_start > subend) {
  681                         exend = rp->er_start;
  682                         break;
  683                 }
  684 
  685                 /*
  686                  * Check the chunk before "rp".  Note that our
  687                  * comparison is safe from overflow conditions.
  688                  */
  689                 if (LE_OV(newstart, size, rp->er_start)) {
  690                         /*
  691                          * Do a boundary check, if necessary.  Note
  692                          * that a region may *begin* on the boundary,
  693                          * but it must end before the boundary.
  694                          */
  695                         if (boundary) {
  696                                 newend = newstart + (size - 1);
  697 
  698                                 /*
  699                                  * Calculate the next boundary after the start
  700                                  * of this region.
  701                                  */
  702                                 dontcross = EXTENT_ALIGN(newstart+1, boundary,
  703                                     (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
  704                                     - 1;
  705 
  706 #if 0
  707                                 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
  708                                     newstart, newend, ex->ex_start, ex->ex_end,
  709                                     boundary, dontcross);
  710 #endif
  711 
  712                                 /* Check for overflow */
  713                                 if (dontcross < ex->ex_start)
  714                                         dontcross = ex->ex_end;
  715                                 else if (newend > dontcross) {
  716                                         /*
  717                                          * Candidate region crosses boundary.
  718                                          * Throw away the leading part and see
  719                                          * if we still fit.
  720                                          */
  721                                         newstart = dontcross + 1;
  722                                         newend = newstart + (size - 1);
  723                                         dontcross += boundary;
  724                                         if (!LE_OV(newstart, size, rp->er_start))
  725                                                 goto skip;
  726                                 }
  727 
  728                                 /*
  729                                  * If we run past the end of
  730                                  * the extent or the boundary
  731                                  * overflows, then the request
  732                                  * can't fit.
  733                                  */
  734                                 if (newstart + size - 1 > ex->ex_end ||
  735                                     dontcross < newstart)
  736                                         goto fail;
  737                         }
  738 
  739                         /*
  740                          * We would fit into this space.  Calculate
  741                          * the overhead (wasted space).  If we exactly
  742                          * fit, or we're taking the first fit, insert
  743                          * ourselves into the region list.
  744                          */
  745                         ovh = rp->er_start - newstart - size;
  746                         if ((flags & EX_FAST) || (ovh == 0))
  747                                 goto found;
  748 
  749                         /*
  750                          * Don't exactly fit, but check to see
  751                          * if we're better than any current choice.
  752                          */
  753                         if ((bestovh == 0) || (ovh < bestovh)) {
  754                                 bestovh = ovh;
  755                                 beststart = newstart;
  756                                 bestlast = last;
  757                         }
  758                 }
  759 
  760 skip:
  761                 /*
  762                  * Skip past the current region and check again.
  763                  */
  764                 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
  765                 if (newstart < rp->er_end) {
  766                         /*
  767                          * Overflow condition.  Don't error out, since
  768                          * we might have a chunk of space that we can
  769                          * use.
  770                          */
  771                         goto fail;
  772                 }
  773 
  774                 last = rp;
  775         }
  776 
  777         /*
  778          * The final check is from the current starting point to the
  779          * end of the subregion.  If there were no allocated regions,
  780          * "newstart" is set to the beginning of the subregion, or
  781          * just past the end of the last allocated region, adjusted
  782          * for alignment in either case.
  783          */
  784         if (LE_OV(newstart, (size - 1), subend)) {
  785                 /*
  786                  * Do a boundary check, if necessary.  Note
  787                  * that a region may *begin* on the boundary,
  788                  * but it must end before the boundary.
  789                  */
  790                 if (boundary) {
  791                         newend = newstart + (size - 1);
  792 
  793                         /*
  794                          * Calculate the next boundary after the start
  795                          * of this region.
  796                          */
  797                         dontcross = EXTENT_ALIGN(newstart+1, boundary,
  798                             (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
  799                             - 1;
  800 
  801 #if 0
  802                         printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
  803                             newstart, newend, ex->ex_start, ex->ex_end,
  804                             boundary, dontcross);
  805 #endif
  806 
  807                         /* Check for overflow */
  808                         if (dontcross < ex->ex_start)
  809                                 dontcross = ex->ex_end;
  810                         else if (newend > dontcross) {
  811                                 /*
  812                                  * Candidate region crosses boundary.
  813                                  * Throw away the leading part and see
  814                                  * if we still fit.
  815                                  */
  816                                 newstart = dontcross + 1;
  817                                 newend = newstart + (size - 1);
  818                                 dontcross += boundary;
  819                                 if (!LE_OV(newstart, (size - 1), subend))
  820                                         goto fail;
  821                         }
  822 
  823                         /*
  824                          * If we run past the end of
  825                          * the extent or the boundary
  826                          * overflows, then the request
  827                          * can't fit.
  828                          */
  829                         if (newstart + size - 1 > ex->ex_end ||
  830                             dontcross < newstart)
  831                                 goto fail;
  832                 }
  833 
  834                 /*
  835                  * We would fit into this space.  Calculate
  836                  * the overhead (wasted space).  If we exactly
  837                  * fit, or we're taking the first fit, insert
  838                  * ourselves into the region list.
  839                  */
  840                 ovh = exend - newstart - (size - 1);
  841                 if ((flags & EX_FAST) || (ovh == 0))
  842                         goto found;
  843 
  844                 /*
  845                  * Don't exactly fit, but check to see
  846                  * if we're better than any current choice.
  847                  */
  848                 if ((bestovh == 0) || (ovh < bestovh)) {
  849                         bestovh = ovh;
  850                         beststart = newstart;
  851                         bestlast = last;
  852                 }
  853         }
  854 
  855  fail:
  856         /*
  857          * One of the following two conditions have
  858          * occurred:
  859          *
  860          *      There is no chunk large enough to hold the request.
  861          *
  862          *      If EX_FAST was not specified, there is not an
  863          *      exact match for the request.
  864          *
  865          * Note that if we reach this point and EX_FAST is
  866          * set, then we know there is no space in the extent for
  867          * the request.
  868          */
  869         if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
  870                 /*
  871                  * We have a match that's "good enough".
  872                  */
  873                 newstart = beststart;
  874                 last = bestlast;
  875                 goto found;
  876         }
  877 
  878         /*
  879          * No space currently available.  Wait for it to free up,
  880          * if possible.
  881          */
  882         if (flags & EX_WAITSPACE) {
  883                 ex->ex_flags |= EXF_WANTED;
  884                 error = ltsleep(ex,
  885                     PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
  886                     "extnt", 0, &ex->ex_slock);
  887                 if (error)
  888                         return (error);
  889                 goto alloc_start;
  890         }
  891 
  892         extent_free_region_descriptor(ex, myrp);
  893         simple_unlock(&ex->ex_slock);
  894         return (EAGAIN);
  895 
  896  found:
  897         /*
  898          * Insert ourselves into the region list.
  899          */
  900         extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
  901         simple_unlock(&ex->ex_slock);
  902         *result = newstart;
  903         return (0);
  904 }
  905 
  906 int
  907 extent_free(ex, start, size, flags)
  908         struct extent *ex;
  909         u_long start, size;
  910         int flags;
  911 {
  912         struct extent_region *rp, *nrp = NULL;
  913         u_long end = start + (size - 1);
  914         int exflags;
  915 
  916 #ifdef DIAGNOSTIC
  917         /*
  918          * Check arguments.
  919          *
  920          * We don't lock to check these, because these values
  921          * are never modified, and if another thread deletes the
  922          * extent, we're screwed anyway.
  923          */
  924         if (ex == NULL)
  925                 panic("extent_free: NULL extent");
  926         if ((start < ex->ex_start) || (end > ex->ex_end)) {
  927                 extent_print(ex);
  928                 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
  929                     ex->ex_name, start, size);
  930                 panic("extent_free: extent `%s', region not within extent",
  931                     ex->ex_name);
  932         }
  933         /* Check for an overflow. */
  934         if (end < start) {
  935                 extent_print(ex);
  936                 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
  937                     ex->ex_name, start, size);
  938                 panic("extent_free: overflow");
  939         }
  940 #endif
  941 
  942         /*
  943          * If we're allowing coalescing, we must allocate a region
  944          * descriptor now, since it might block.
  945          *
  946          * XXX Make a static, create-time flags word, so we don't
  947          * XXX have to lock to read it!
  948          */
  949         simple_lock(&ex->ex_slock);
  950         exflags = ex->ex_flags;
  951         simple_unlock(&ex->ex_slock);
  952 
  953         if ((exflags & EXF_NOCOALESCE) == 0) {
  954                 /* Allocate a region descriptor. */
  955                 nrp = extent_alloc_region_descriptor(ex, flags);
  956                 if (nrp == NULL)
  957                         return (ENOMEM);
  958         }
  959 
  960         simple_lock(&ex->ex_slock);
  961 
  962         /*
  963          * Find region and deallocate.  Several possibilities:
  964          *
  965          *      1. (start == er_start) && (end == er_end):
  966          *         Free descriptor.
  967          *
  968          *      2. (start == er_start) && (end < er_end):
  969          *         Adjust er_start.
  970          *
  971          *      3. (start > er_start) && (end == er_end):
  972          *         Adjust er_end.
  973          *
  974          *      4. (start > er_start) && (end < er_end):
  975          *         Fragment region.  Requires descriptor alloc.
  976          *
  977          * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
  978          * is not set.
  979          */
  980         LIST_FOREACH(rp, &ex->ex_regions, er_link) {
  981                 /*
  982                  * Save ourselves some comparisons; does the current
  983                  * region end before chunk to be freed begins?  If so,
  984                  * then we haven't found the appropriate region descriptor.
  985                  */
  986                 if (rp->er_end < start)
  987                         continue;
  988 
  989                 /*
  990                  * Save ourselves some traversal; does the current
  991                  * region begin after the chunk to be freed ends?  If so,
  992                  * then we've already passed any possible region descriptors
  993                  * that might have contained the chunk to be freed.
  994                  */
  995                 if (rp->er_start > end)
  996                         break;
  997 
  998                 /* Case 1. */
  999                 if ((start == rp->er_start) && (end == rp->er_end)) {
 1000                         LIST_REMOVE(rp, er_link);
 1001                         extent_free_region_descriptor(ex, rp);
 1002                         goto done;
 1003                 }
 1004 
 1005                 /*
 1006                  * The following cases all require that EXF_NOCOALESCE
 1007                  * is not set.
 1008                  */
 1009                 if (ex->ex_flags & EXF_NOCOALESCE)
 1010                         continue;
 1011 
 1012                 /* Case 2. */
 1013                 if ((start == rp->er_start) && (end < rp->er_end)) {
 1014                         rp->er_start = (end + 1);
 1015                         goto done;
 1016                 }
 1017 
 1018                 /* Case 3. */
 1019                 if ((start > rp->er_start) && (end == rp->er_end)) {
 1020                         rp->er_end = (start - 1);
 1021                         goto done;
 1022                 }
 1023 
 1024                 /* Case 4. */
 1025                 if ((start > rp->er_start) && (end < rp->er_end)) {
 1026                         /* Fill in new descriptor. */
 1027                         nrp->er_start = end + 1;
 1028                         nrp->er_end = rp->er_end;
 1029 
 1030                         /* Adjust current descriptor. */
 1031                         rp->er_end = start - 1;
 1032 
 1033                         /* Insert new descriptor after current. */
 1034                         LIST_INSERT_AFTER(rp, nrp, er_link);
 1035 
 1036                         /* We used the new descriptor, so don't free it below */
 1037                         nrp = NULL;
 1038                         goto done;
 1039                 }
 1040         }
 1041 
 1042         /* Region not found, or request otherwise invalid. */
 1043         simple_unlock(&ex->ex_slock);
 1044         extent_print(ex);
 1045         printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
 1046         panic("extent_free: region not found");
 1047 
 1048  done:
 1049         if (nrp != NULL)
 1050                 extent_free_region_descriptor(ex, nrp);
 1051         if (ex->ex_flags & EXF_WANTED) {
 1052                 ex->ex_flags &= ~EXF_WANTED;
 1053                 wakeup(ex);
 1054         }
 1055         simple_unlock(&ex->ex_slock);
 1056         return (0);
 1057 }
 1058 
 1059 /*
 1060  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED,
 1061  * AS THIS FUNCTION MAY BLOCK!  We will handle any locking we may need.
 1062  */
 1063 static struct extent_region *
 1064 extent_alloc_region_descriptor(ex, flags)
 1065         struct extent *ex;
 1066         int flags;
 1067 {
 1068         struct extent_region *rp;
 1069         int exflags;
 1070         int s;
 1071 
 1072         /*
 1073          * If the kernel memory allocator is not yet running, we can't
 1074          * use it (obviously).
 1075          */
 1076         if (KMEM_IS_RUNNING == 0)
 1077                 flags &= ~EX_MALLOCOK;
 1078 
 1079         /*
 1080          * XXX Make a static, create-time flags word, so we don't
 1081          * XXX have to lock to read it!
 1082          */
 1083         simple_lock(&ex->ex_slock);
 1084         exflags = ex->ex_flags;
 1085         simple_unlock(&ex->ex_slock);
 1086 
 1087         if (exflags & EXF_FIXED) {
 1088                 struct extent_fixed *fex = (struct extent_fixed *)ex;
 1089 
 1090                 for (;;) {
 1091                         simple_lock(&ex->ex_slock);
 1092                         if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
 1093                                 /*
 1094                                  * Don't muck with flags after pulling it off
 1095                                  * the freelist; it may have been dynamically
 1096                                  * allocated, and kindly given to us.  We
 1097                                  * need to remember that information.
 1098                                  */
 1099                                 LIST_REMOVE(rp, er_link);
 1100                                 simple_unlock(&ex->ex_slock);
 1101                                 return (rp);
 1102                         }
 1103                         if (flags & EX_MALLOCOK) {
 1104                                 simple_unlock(&ex->ex_slock);
 1105                                 goto alloc;
 1106                         }
 1107                         if ((flags & EX_WAITOK) == 0) {
 1108                                 simple_unlock(&ex->ex_slock);
 1109                                 return (NULL);
 1110                         }
 1111                         ex->ex_flags |= EXF_FLWANTED;
 1112                         if (ltsleep(&fex->fex_freelist,
 1113                             PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
 1114                             "extnt", 0, &ex->ex_slock))
 1115                                 return (NULL);
 1116                 }
 1117         }
 1118 
 1119  alloc:
 1120         s = splhigh();
 1121         if (expool_initialized == 0)
 1122                 expool_init();
 1123         rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
 1124         splx(s);
 1125 
 1126         if (rp != NULL)
 1127                 rp->er_flags = ER_ALLOC;
 1128 
 1129         return (rp);
 1130 }
 1131 
 1132 /*
 1133  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!  This
 1134  * is safe as we do not block here.
 1135  */
 1136 static void
 1137 extent_free_region_descriptor(ex, rp)
 1138         struct extent *ex;
 1139         struct extent_region *rp;
 1140 {
 1141         int s;
 1142 
 1143         if (ex->ex_flags & EXF_FIXED) {
 1144                 struct extent_fixed *fex = (struct extent_fixed *)ex;
 1145 
 1146                 /*
 1147                  * If someone's waiting for a region descriptor,
 1148                  * be nice and give them this one, rather than
 1149                  * just free'ing it back to the system.
 1150                  */
 1151                 if (rp->er_flags & ER_ALLOC) {
 1152                         if (ex->ex_flags & EXF_FLWANTED) {
 1153                                 /* Clear all but ER_ALLOC flag. */
 1154                                 rp->er_flags = ER_ALLOC;
 1155                                 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
 1156                                     er_link);
 1157                                 goto wake_em_up;
 1158                         } else {
 1159                                 s = splhigh();
 1160                                 pool_put(&expool, rp);
 1161                                 splx(s);
 1162                         }
 1163                 } else {
 1164                         /* Clear all flags. */
 1165                         rp->er_flags = 0;
 1166                         LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
 1167                 }
 1168 
 1169                 if (ex->ex_flags & EXF_FLWANTED) {
 1170  wake_em_up:
 1171                         ex->ex_flags &= ~EXF_FLWANTED;
 1172                         wakeup(&fex->fex_freelist);
 1173                 }
 1174                 return;
 1175         }
 1176 
 1177         /*
 1178          * We know it's dynamically allocated if we get here.
 1179          */
 1180         s = splhigh();
 1181         pool_put(&expool, rp);
 1182         splx(s);
 1183 }
 1184 
 1185 void
 1186 extent_print(ex)
 1187         struct extent *ex;
 1188 {
 1189         struct extent_region *rp;
 1190 
 1191         if (ex == NULL)
 1192                 panic("extent_print: NULL extent");
 1193 
 1194         simple_lock(&ex->ex_slock);
 1195 
 1196         printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
 1197             ex->ex_start, ex->ex_end, ex->ex_flags);
 1198 
 1199         LIST_FOREACH(rp, &ex->ex_regions, er_link)
 1200                 printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
 1201 
 1202         simple_unlock(&ex->ex_slock);
 1203 }

Cache object: 4b14c86b554f24adaa7f371ba46541f8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.