The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rman.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 1998 Massachusetts Institute of Technology
    3  *
    4  * Permission to use, copy, modify, and distribute this software and
    5  * its documentation for any purpose and without fee is hereby
    6  * granted, provided that both the above copyright notice and this
    7  * permission notice appear in all copies, that both the above
    8  * copyright notice and this permission notice appear in all
    9  * supporting documentation, and that the name of M.I.T. not be used
   10  * in advertising or publicity pertaining to distribution of the
   11  * software without specific, written prior permission.  M.I.T. makes
   12  * no representations about the suitability of this software for any
   13  * purpose.  It is provided "as is" without express or implied
   14  * warranty.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
   17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
   18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
   20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
   24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * The kernel resource manager.  This code is responsible for keeping track
   32  * of hardware resources which are apportioned out to various drivers.
   33  * It does not actually assign those resources, and it is not expected
   34  * that end-device drivers will call into this code directly.  Rather,
   35  * the code which implements the buses that those devices are attached to,
   36  * and the code which manages CPU resources, will call this code, and the
   37  * end-device drivers will make upcalls to that code to actually perform
   38  * the allocation.
   39  *
   40  * There are two sorts of resources managed by this code.  The first is
   41  * the more familiar array (RMAN_ARRAY) type; resources in this class
   42  * consist of a sequence of individually-allocatable objects which have
   43  * been numbered in some well-defined order.  Most of the resources
   44  * are of this type, as it is the most familiar.  The second type is
   45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
   46  * resources in which each instance is indistinguishable from every
   47  * other instance).  The principal anticipated application of gauges
   48  * is in the context of power consumption, where a bus may have a specific
   49  * power budget which all attached devices share.  RMAN_GAUGE is not
   50  * implemented yet.
   51  *
   52  * For array resources, we make one simplifying assumption: two clients
   53  * sharing the same resource must use the same range of indices.  That
   54  * is to say, sharing of overlapping-but-not-identical regions is not
   55  * permitted.
   56  */
   57 
   58 #include "opt_ddb.h"
   59 
   60 #include <sys/cdefs.h>
   61 __FBSDID("$FreeBSD$");
   62 
   63 #include <sys/param.h>
   64 #include <sys/systm.h>
   65 #include <sys/kernel.h>
   66 #include <sys/limits.h>
   67 #include <sys/lock.h>
   68 #include <sys/malloc.h>
   69 #include <sys/mutex.h>
   70 #include <sys/bus.h>            /* XXX debugging */
   71 #include <machine/bus.h>
   72 #include <sys/rman.h>
   73 #include <sys/sysctl.h>
   74 
   75 #ifdef DDB
   76 #include <ddb/ddb.h>
   77 #endif
   78 
   79 /*
   80  * We use a linked list rather than a bitmap because we need to be able to
   81  * represent potentially huge objects (like all of a processor's physical
   82  * address space).  That is also why the indices are defined to have type
   83  * `unsigned long' -- that being the largest integral type in ISO C (1990).
   84  * The 1999 version of C allows `long long'; we may need to switch to that
   85  * at some point in the future, particularly if we want to support 36-bit
   86  * addresses on IA32 hardware.
   87  */
   88 struct resource_i {
   89         struct resource         r_r;
   90         TAILQ_ENTRY(resource_i) r_link;
   91         LIST_ENTRY(resource_i)  r_sharelink;
   92         LIST_HEAD(, resource_i) *r_sharehead;
   93         rman_res_t      r_start;        /* index of the first entry in this resource */
   94         rman_res_t      r_end;          /* index of the last entry (inclusive) */
   95         u_int   r_flags;
   96         void    *r_virtual;     /* virtual address of this resource */
   97         void    *r_irq_cookie;  /* interrupt cookie for this (interrupt) resource */
   98         device_t r_dev; /* device which has allocated this resource */
   99         struct rman *r_rm;      /* resource manager from whence this came */
  100         int     r_rid;          /* optional rid for this resource. */
  101 };
  102 
  103 static int rman_debug = 0;
  104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
  105     &rman_debug, 0, "rman debug");
  106 
  107 #define DPRINTF(params) if (rman_debug) printf params
  108 
  109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
  110 
  111 struct rman_head rman_head;
  112 static struct mtx rman_mtx; /* mutex to protect rman_head */
  113 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
  114 
  115 static __inline struct resource_i *
  116 int_alloc_resource(int malloc_flag)
  117 {
  118         struct resource_i *r;
  119 
  120         r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
  121         if (r != NULL) {
  122                 r->r_r.__r_i = r;
  123         }
  124         return (r);
  125 }
  126 
  127 int
  128 rman_init(struct rman *rm)
  129 {
  130         static int once = 0;
  131 
  132         if (once == 0) {
  133                 once = 1;
  134                 TAILQ_INIT(&rman_head);
  135                 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
  136         }
  137 
  138         if (rm->rm_start == 0 && rm->rm_end == 0)
  139                 rm->rm_end = ~0;
  140         if (rm->rm_type == RMAN_UNINIT)
  141                 panic("rman_init");
  142         if (rm->rm_type == RMAN_GAUGE)
  143                 panic("implement RMAN_GAUGE");
  144 
  145         TAILQ_INIT(&rm->rm_list);
  146         rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
  147         if (rm->rm_mtx == NULL)
  148                 return ENOMEM;
  149         mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
  150 
  151         mtx_lock(&rman_mtx);
  152         TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
  153         mtx_unlock(&rman_mtx);
  154         return 0;
  155 }
  156 
  157 int
  158 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
  159 {
  160         struct resource_i *r, *s, *t;
  161         int rv = 0;
  162 
  163         DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
  164             rm->rm_descr, start, end));
  165         if (start < rm->rm_start || end > rm->rm_end)
  166                 return EINVAL;
  167         r = int_alloc_resource(M_NOWAIT);
  168         if (r == NULL)
  169                 return ENOMEM;
  170         r->r_start = start;
  171         r->r_end = end;
  172         r->r_rm = rm;
  173 
  174         mtx_lock(rm->rm_mtx);
  175 
  176         /* Skip entries before us. */
  177         TAILQ_FOREACH(s, &rm->rm_list, r_link) {
  178                 if (s->r_end == ~0)
  179                         break;
  180                 if (s->r_end + 1 >= r->r_start)
  181                         break;
  182         }
  183 
  184         /* If we ran off the end of the list, insert at the tail. */
  185         if (s == NULL) {
  186                 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
  187         } else {
  188                 /* Check for any overlap with the current region. */
  189                 if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
  190                         rv = EBUSY;
  191                         goto out;
  192                 }
  193 
  194                 /* Check for any overlap with the next region. */
  195                 t = TAILQ_NEXT(s, r_link);
  196                 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
  197                         rv = EBUSY;
  198                         goto out;
  199                 }
  200 
  201                 /*
  202                  * See if this region can be merged with the next region.  If
  203                  * not, clear the pointer.
  204                  */
  205                 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
  206                         t = NULL;
  207 
  208                 /* See if we can merge with the current region. */
  209                 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
  210                         /* Can we merge all 3 regions? */
  211                         if (t != NULL) {
  212                                 s->r_end = t->r_end;
  213                                 TAILQ_REMOVE(&rm->rm_list, t, r_link);
  214                                 free(r, M_RMAN);
  215                                 free(t, M_RMAN);
  216                         } else {
  217                                 s->r_end = r->r_end;
  218                                 free(r, M_RMAN);
  219                         }
  220                 } else if (t != NULL) {
  221                         /* Can we merge with just the next region? */
  222                         t->r_start = r->r_start;
  223                         free(r, M_RMAN);
  224                 } else if (s->r_end < r->r_start) {
  225                         TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
  226                 } else {
  227                         TAILQ_INSERT_BEFORE(s, r, r_link);
  228                 }
  229         }
  230 out:
  231         mtx_unlock(rm->rm_mtx);
  232         return rv;
  233 }
  234 
  235 int
  236 rman_init_from_resource(struct rman *rm, struct resource *r)
  237 {
  238         int rv;
  239 
  240         if ((rv = rman_init(rm)) != 0)
  241                 return (rv);
  242         return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
  243 }
  244 
  245 int
  246 rman_fini(struct rman *rm)
  247 {
  248         struct resource_i *r;
  249 
  250         mtx_lock(rm->rm_mtx);
  251         TAILQ_FOREACH(r, &rm->rm_list, r_link) {
  252                 if (r->r_flags & RF_ALLOCATED) {
  253                         mtx_unlock(rm->rm_mtx);
  254                         return EBUSY;
  255                 }
  256         }
  257 
  258         /*
  259          * There really should only be one of these if we are in this
  260          * state and the code is working properly, but it can't hurt.
  261          */
  262         while (!TAILQ_EMPTY(&rm->rm_list)) {
  263                 r = TAILQ_FIRST(&rm->rm_list);
  264                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  265                 free(r, M_RMAN);
  266         }
  267         mtx_unlock(rm->rm_mtx);
  268         mtx_lock(&rman_mtx);
  269         TAILQ_REMOVE(&rman_head, rm, rm_link);
  270         mtx_unlock(&rman_mtx);
  271         mtx_destroy(rm->rm_mtx);
  272         free(rm->rm_mtx, M_RMAN);
  273 
  274         return 0;
  275 }
  276 
  277 int
  278 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
  279 {
  280         struct resource_i *r;
  281 
  282         mtx_lock(rm->rm_mtx);
  283         TAILQ_FOREACH(r, &rm->rm_list, r_link) {
  284                 if (!(r->r_flags & RF_ALLOCATED)) {
  285                         *start = r->r_start;
  286                         *end = r->r_end;
  287                         mtx_unlock(rm->rm_mtx);
  288                         return (0);
  289                 }
  290         }
  291         mtx_unlock(rm->rm_mtx);
  292         return (ENOENT);
  293 }
  294 
  295 int
  296 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
  297 {
  298         struct resource_i *r;
  299 
  300         mtx_lock(rm->rm_mtx);
  301         TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
  302                 if (!(r->r_flags & RF_ALLOCATED)) {
  303                         *start = r->r_start;
  304                         *end = r->r_end;
  305                         mtx_unlock(rm->rm_mtx);
  306                         return (0);
  307                 }
  308         }
  309         mtx_unlock(rm->rm_mtx);
  310         return (ENOENT);
  311 }
  312 
  313 /* Shrink or extend one or both ends of an allocated resource. */
  314 int
  315 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
  316 {
  317         struct resource_i *r, *s, *t, *new;
  318         struct rman *rm;
  319 
  320         /* Not supported for shared resources. */
  321         r = rr->__r_i;
  322         if (r->r_flags & RF_SHAREABLE)
  323                 return (EINVAL);
  324 
  325         /*
  326          * This does not support wholesale moving of a resource.  At
  327          * least part of the desired new range must overlap with the
  328          * existing resource.
  329          */
  330         if (end < r->r_start || r->r_end < start)
  331                 return (EINVAL);
  332 
  333         /*
  334          * Find the two resource regions immediately adjacent to the
  335          * allocated resource.
  336          */
  337         rm = r->r_rm;
  338         mtx_lock(rm->rm_mtx);
  339 #ifdef INVARIANTS
  340         TAILQ_FOREACH(s, &rm->rm_list, r_link) {
  341                 if (s == r)
  342                         break;
  343         }
  344         if (s == NULL)
  345                 panic("resource not in list");
  346 #endif
  347         s = TAILQ_PREV(r, resource_head, r_link);
  348         t = TAILQ_NEXT(r, r_link);
  349         KASSERT(s == NULL || s->r_end + 1 == r->r_start,
  350             ("prev resource mismatch"));
  351         KASSERT(t == NULL || r->r_end + 1 == t->r_start,
  352             ("next resource mismatch"));
  353 
  354         /*
  355          * See if the changes are permitted.  Shrinking is always allowed,
  356          * but growing requires sufficient room in the adjacent region.
  357          */
  358         if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
  359             s->r_start > start)) {
  360                 mtx_unlock(rm->rm_mtx);
  361                 return (EBUSY);
  362         }
  363         if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
  364             t->r_end < end)) {
  365                 mtx_unlock(rm->rm_mtx);
  366                 return (EBUSY);
  367         }
  368 
  369         /*
  370          * While holding the lock, grow either end of the resource as
  371          * needed and shrink either end if the shrinking does not require
  372          * allocating a new resource.  We can safely drop the lock and then
  373          * insert a new range to handle the shrinking case afterwards.
  374          */
  375         if (start < r->r_start ||
  376             (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
  377                 KASSERT(s->r_flags == 0, ("prev is busy"));
  378                 r->r_start = start;
  379                 if (s->r_start == start) {
  380                         TAILQ_REMOVE(&rm->rm_list, s, r_link);
  381                         free(s, M_RMAN);
  382                 } else
  383                         s->r_end = start - 1;
  384         }
  385         if (end > r->r_end ||
  386             (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
  387                 KASSERT(t->r_flags == 0, ("next is busy"));
  388                 r->r_end = end;
  389                 if (t->r_end == end) {
  390                         TAILQ_REMOVE(&rm->rm_list, t, r_link);
  391                         free(t, M_RMAN);
  392                 } else
  393                         t->r_start = end + 1;
  394         }
  395         mtx_unlock(rm->rm_mtx);
  396 
  397         /*
  398          * Handle the shrinking cases that require allocating a new
  399          * resource to hold the newly-free region.  We have to recheck
  400          * if we still need this new region after acquiring the lock.
  401          */
  402         if (start > r->r_start) {
  403                 new = int_alloc_resource(M_WAITOK);
  404                 new->r_start = r->r_start;
  405                 new->r_end = start - 1;
  406                 new->r_rm = rm;
  407                 mtx_lock(rm->rm_mtx);
  408                 r->r_start = start;
  409                 s = TAILQ_PREV(r, resource_head, r_link);
  410                 if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
  411                         s->r_end = start - 1;
  412                         free(new, M_RMAN);
  413                 } else
  414                         TAILQ_INSERT_BEFORE(r, new, r_link);
  415                 mtx_unlock(rm->rm_mtx);
  416         }
  417         if (end < r->r_end) {
  418                 new = int_alloc_resource(M_WAITOK);
  419                 new->r_start = end + 1;
  420                 new->r_end = r->r_end;
  421                 new->r_rm = rm;
  422                 mtx_lock(rm->rm_mtx);
  423                 r->r_end = end;
  424                 t = TAILQ_NEXT(r, r_link);
  425                 if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
  426                         t->r_start = end + 1;
  427                         free(new, M_RMAN);
  428                 } else
  429                         TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
  430                 mtx_unlock(rm->rm_mtx);
  431         }
  432         return (0);
  433 }
  434 
  435 #define SHARE_TYPE(f)   (f & (RF_SHAREABLE | RF_PREFETCHABLE))
  436 
  437 struct resource *
  438 rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
  439                             rman_res_t count, rman_res_t bound, u_int flags,
  440                             device_t dev)
  441 {
  442         u_int new_rflags;
  443         struct resource_i *r, *s, *rv;
  444         rman_res_t rstart, rend, amask, bmask;
  445 
  446         rv = NULL;
  447 
  448         DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
  449                "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
  450                count, flags,
  451                dev == NULL ? "<null>" : device_get_nameunit(dev)));
  452         KASSERT(count != 0, ("%s: attempted to allocate an empty range",
  453             __func__));
  454         KASSERT((flags & RF_FIRSTSHARE) == 0,
  455             ("invalid flags %#x", flags));
  456         new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
  457 
  458         mtx_lock(rm->rm_mtx);
  459 
  460         r = TAILQ_FIRST(&rm->rm_list);
  461         if (r == NULL) {
  462             DPRINTF(("NULL list head\n"));
  463         } else {
  464             DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
  465                     r->r_end, start, count-1));
  466         }
  467         for (r = TAILQ_FIRST(&rm->rm_list);
  468              r && r->r_end < start + count - 1;
  469              r = TAILQ_NEXT(r, r_link)) {
  470                 ;
  471                 DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
  472                         r->r_end, start, count-1));
  473         }
  474 
  475         if (r == NULL) {
  476                 DPRINTF(("could not find a region\n"));
  477                 goto out;
  478         }
  479 
  480         amask = (1ull << RF_ALIGNMENT(flags)) - 1;
  481         KASSERT(start <= RM_MAX_END - amask,
  482             ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
  483 
  484         /* If bound is 0, bmask will also be 0 */
  485         bmask = ~(bound - 1);
  486         /*
  487          * First try to find an acceptable totally-unshared region.
  488          */
  489         for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
  490                 DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
  491                 /*
  492                  * The resource list is sorted, so there is no point in
  493                  * searching further once r_start is too large.
  494                  */
  495                 if (s->r_start > end - (count - 1)) {
  496                         DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
  497                             s->r_start, end));
  498                         break;
  499                 }
  500                 if (s->r_start > RM_MAX_END - amask) {
  501                         DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
  502                             s->r_start, amask));
  503                         break;
  504                 }
  505                 if (s->r_flags & RF_ALLOCATED) {
  506                         DPRINTF(("region is allocated\n"));
  507                         continue;
  508                 }
  509                 rstart = ummax(s->r_start, start);
  510                 /*
  511                  * Try to find a region by adjusting to boundary and alignment
  512                  * until both conditions are satisfied. This is not an optimal
  513                  * algorithm, but in most cases it isn't really bad, either.
  514                  */
  515                 do {
  516                         rstart = (rstart + amask) & ~amask;
  517                         if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
  518                                 rstart += bound - (rstart & ~bmask);
  519                 } while ((rstart & amask) != 0 && rstart < end &&
  520                     rstart < s->r_end);
  521                 rend = ummin(s->r_end, ummax(rstart + count - 1, end));
  522                 if (rstart > rend) {
  523                         DPRINTF(("adjusted start exceeds end\n"));
  524                         continue;
  525                 }
  526                 DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
  527                        rstart, rend, (rend - rstart + 1), count));
  528 
  529                 if ((rend - rstart) >= (count - 1)) {
  530                         DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
  531                                rstart, rend, (rend - rstart + 1)));
  532                         if ((s->r_end - s->r_start + 1) == count) {
  533                                 DPRINTF(("candidate region is entire chunk\n"));
  534                                 rv = s;
  535                                 rv->r_flags = new_rflags;
  536                                 rv->r_dev = dev;
  537                                 goto out;
  538                         }
  539 
  540                         /*
  541                          * If s->r_start < rstart and
  542                          *    s->r_end > rstart + count - 1, then
  543                          * we need to split the region into three pieces
  544                          * (the middle one will get returned to the user).
  545                          * Otherwise, we are allocating at either the
  546                          * beginning or the end of s, so we only need to
  547                          * split it in two.  The first case requires
  548                          * two new allocations; the second requires but one.
  549                          */
  550                         rv = int_alloc_resource(M_NOWAIT);
  551                         if (rv == NULL)
  552                                 goto out;
  553                         rv->r_start = rstart;
  554                         rv->r_end = rstart + count - 1;
  555                         rv->r_flags = new_rflags;
  556                         rv->r_dev = dev;
  557                         rv->r_rm = rm;
  558 
  559                         if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
  560                                 DPRINTF(("splitting region in three parts: "
  561                                        "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
  562                                        s->r_start, rv->r_start - 1,
  563                                        rv->r_start, rv->r_end,
  564                                        rv->r_end + 1, s->r_end));
  565                                 /*
  566                                  * We are allocating in the middle.
  567                                  */
  568                                 r = int_alloc_resource(M_NOWAIT);
  569                                 if (r == NULL) {
  570                                         free(rv, M_RMAN);
  571                                         rv = NULL;
  572                                         goto out;
  573                                 }
  574                                 r->r_start = rv->r_end + 1;
  575                                 r->r_end = s->r_end;
  576                                 r->r_flags = s->r_flags;
  577                                 r->r_rm = rm;
  578                                 s->r_end = rv->r_start - 1;
  579                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  580                                                      r_link);
  581                                 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
  582                                                      r_link);
  583                         } else if (s->r_start == rv->r_start) {
  584                                 DPRINTF(("allocating from the beginning\n"));
  585                                 /*
  586                                  * We are allocating at the beginning.
  587                                  */
  588                                 s->r_start = rv->r_end + 1;
  589                                 TAILQ_INSERT_BEFORE(s, rv, r_link);
  590                         } else {
  591                                 DPRINTF(("allocating at the end\n"));
  592                                 /*
  593                                  * We are allocating at the end.
  594                                  */
  595                                 s->r_end = rv->r_start - 1;
  596                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  597                                                      r_link);
  598                         }
  599                         goto out;
  600                 }
  601         }
  602 
  603         /*
  604          * Now find an acceptable shared region, if the client's requirements
  605          * allow sharing.  By our implementation restriction, a candidate
  606          * region must match exactly by both size and sharing type in order
  607          * to be considered compatible with the client's request.  (The
  608          * former restriction could probably be lifted without too much
  609          * additional work, but this does not seem warranted.)
  610          */
  611         DPRINTF(("no unshared regions found\n"));
  612         if ((flags & RF_SHAREABLE) == 0)
  613                 goto out;
  614 
  615         for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
  616                 if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
  617                     s->r_start >= start &&
  618                     (s->r_end - s->r_start + 1) == count &&
  619                     (s->r_start & amask) == 0 &&
  620                     ((s->r_start ^ s->r_end) & bmask) == 0) {
  621                         rv = int_alloc_resource(M_NOWAIT);
  622                         if (rv == NULL)
  623                                 goto out;
  624                         rv->r_start = s->r_start;
  625                         rv->r_end = s->r_end;
  626                         rv->r_flags = new_rflags;
  627                         rv->r_dev = dev;
  628                         rv->r_rm = rm;
  629                         if (s->r_sharehead == NULL) {
  630                                 s->r_sharehead = malloc(sizeof *s->r_sharehead,
  631                                                 M_RMAN, M_NOWAIT | M_ZERO);
  632                                 if (s->r_sharehead == NULL) {
  633                                         free(rv, M_RMAN);
  634                                         rv = NULL;
  635                                         goto out;
  636                                 }
  637                                 LIST_INIT(s->r_sharehead);
  638                                 LIST_INSERT_HEAD(s->r_sharehead, s,
  639                                                  r_sharelink);
  640                                 s->r_flags |= RF_FIRSTSHARE;
  641                         }
  642                         rv->r_sharehead = s->r_sharehead;
  643                         LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
  644                         goto out;
  645                 }
  646         }
  647         /*
  648          * We couldn't find anything.
  649          */
  650 
  651 out:
  652         mtx_unlock(rm->rm_mtx);
  653         return (rv == NULL ? NULL : &rv->r_r);
  654 }
  655 
  656 struct resource *
  657 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
  658                       rman_res_t count, u_int flags, device_t dev)
  659 {
  660 
  661         return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
  662             dev));
  663 }
  664 
  665 int
  666 rman_activate_resource(struct resource *re)
  667 {
  668         struct resource_i *r;
  669         struct rman *rm;
  670 
  671         r = re->__r_i;
  672         rm = r->r_rm;
  673         mtx_lock(rm->rm_mtx);
  674         r->r_flags |= RF_ACTIVE;
  675         mtx_unlock(rm->rm_mtx);
  676         return 0;
  677 }
  678 
  679 int
  680 rman_deactivate_resource(struct resource *r)
  681 {
  682         struct rman *rm;
  683 
  684         rm = r->__r_i->r_rm;
  685         mtx_lock(rm->rm_mtx);
  686         r->__r_i->r_flags &= ~RF_ACTIVE;
  687         mtx_unlock(rm->rm_mtx);
  688         return 0;
  689 }
  690 
  691 static int
  692 int_rman_release_resource(struct rman *rm, struct resource_i *r)
  693 {
  694         struct resource_i *s, *t;
  695 
  696         if (r->r_flags & RF_ACTIVE)
  697                 r->r_flags &= ~RF_ACTIVE;
  698 
  699         /*
  700          * Check for a sharing list first.  If there is one, then we don't
  701          * have to think as hard.
  702          */
  703         if (r->r_sharehead) {
  704                 /*
  705                  * If a sharing list exists, then we know there are at
  706                  * least two sharers.
  707                  *
  708                  * If we are in the main circleq, appoint someone else.
  709                  */
  710                 LIST_REMOVE(r, r_sharelink);
  711                 s = LIST_FIRST(r->r_sharehead);
  712                 if (r->r_flags & RF_FIRSTSHARE) {
  713                         s->r_flags |= RF_FIRSTSHARE;
  714                         TAILQ_INSERT_BEFORE(r, s, r_link);
  715                         TAILQ_REMOVE(&rm->rm_list, r, r_link);
  716                 }
  717 
  718                 /*
  719                  * Make sure that the sharing list goes away completely
  720                  * if the resource is no longer being shared at all.
  721                  */
  722                 if (LIST_NEXT(s, r_sharelink) == NULL) {
  723                         free(s->r_sharehead, M_RMAN);
  724                         s->r_sharehead = NULL;
  725                         s->r_flags &= ~RF_FIRSTSHARE;
  726                 }
  727                 goto out;
  728         }
  729 
  730         /*
  731          * Look at the adjacent resources in the list and see if our
  732          * segment can be merged with any of them.  If either of the
  733          * resources is allocated or is not exactly adjacent then they
  734          * cannot be merged with our segment.
  735          */
  736         s = TAILQ_PREV(r, resource_head, r_link);
  737         if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
  738             s->r_end + 1 != r->r_start))
  739                 s = NULL;
  740         t = TAILQ_NEXT(r, r_link);
  741         if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
  742             r->r_end + 1 != t->r_start))
  743                 t = NULL;
  744 
  745         if (s != NULL && t != NULL) {
  746                 /*
  747                  * Merge all three segments.
  748                  */
  749                 s->r_end = t->r_end;
  750                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  751                 TAILQ_REMOVE(&rm->rm_list, t, r_link);
  752                 free(t, M_RMAN);
  753         } else if (s != NULL) {
  754                 /*
  755                  * Merge previous segment with ours.
  756                  */
  757                 s->r_end = r->r_end;
  758                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  759         } else if (t != NULL) {
  760                 /*
  761                  * Merge next segment with ours.
  762                  */
  763                 t->r_start = r->r_start;
  764                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  765         } else {
  766                 /*
  767                  * At this point, we know there is nothing we
  768                  * can potentially merge with, because on each
  769                  * side, there is either nothing there or what is
  770                  * there is still allocated.  In that case, we don't
  771                  * want to remove r from the list; we simply want to
  772                  * change it to an unallocated region and return
  773                  * without freeing anything.
  774                  */
  775                 r->r_flags &= ~RF_ALLOCATED;
  776                 r->r_dev = NULL;
  777                 return 0;
  778         }
  779 
  780 out:
  781         free(r, M_RMAN);
  782         return 0;
  783 }
  784 
  785 int
  786 rman_release_resource(struct resource *re)
  787 {
  788         int rv;
  789         struct resource_i *r;
  790         struct rman *rm;
  791 
  792         r = re->__r_i;
  793         rm = r->r_rm;
  794         mtx_lock(rm->rm_mtx);
  795         rv = int_rman_release_resource(rm, r);
  796         mtx_unlock(rm->rm_mtx);
  797         return (rv);
  798 }
  799 
  800 uint32_t
  801 rman_make_alignment_flags(uint32_t size)
  802 {
  803         int i;
  804 
  805         /*
  806          * Find the hightest bit set, and add one if more than one bit
  807          * set.  We're effectively computing the ceil(log2(size)) here.
  808          */
  809         for (i = 31; i > 0; i--)
  810                 if ((1 << i) & size)
  811                         break;
  812         if (~(1 << i) & size)
  813                 i++;
  814 
  815         return(RF_ALIGNMENT_LOG2(i));
  816 }
  817 
  818 void
  819 rman_set_start(struct resource *r, rman_res_t start)
  820 {
  821 
  822         r->__r_i->r_start = start;
  823 }
  824 
  825 rman_res_t
  826 rman_get_start(struct resource *r)
  827 {
  828 
  829         return (r->__r_i->r_start);
  830 }
  831 
  832 void
  833 rman_set_end(struct resource *r, rman_res_t end)
  834 {
  835 
  836         r->__r_i->r_end = end;
  837 }
  838 
  839 rman_res_t
  840 rman_get_end(struct resource *r)
  841 {
  842 
  843         return (r->__r_i->r_end);
  844 }
  845 
  846 rman_res_t
  847 rman_get_size(struct resource *r)
  848 {
  849 
  850         return (r->__r_i->r_end - r->__r_i->r_start + 1);
  851 }
  852 
  853 u_int
  854 rman_get_flags(struct resource *r)
  855 {
  856 
  857         return (r->__r_i->r_flags);
  858 }
  859 
  860 void
  861 rman_set_virtual(struct resource *r, void *v)
  862 {
  863 
  864         r->__r_i->r_virtual = v;
  865 }
  866 
  867 void *
  868 rman_get_virtual(struct resource *r)
  869 {
  870 
  871         return (r->__r_i->r_virtual);
  872 }
  873 
  874 void
  875 rman_set_irq_cookie(struct resource *r, void *c)
  876 {
  877 
  878         r->__r_i->r_irq_cookie = c;
  879 }
  880 
  881 void *
  882 rman_get_irq_cookie(struct resource *r)
  883 {
  884 
  885         return (r->__r_i->r_irq_cookie);
  886 }
  887 
  888 void
  889 rman_set_bustag(struct resource *r, bus_space_tag_t t)
  890 {
  891 
  892         r->r_bustag = t;
  893 }
  894 
  895 bus_space_tag_t
  896 rman_get_bustag(struct resource *r)
  897 {
  898 
  899         return (r->r_bustag);
  900 }
  901 
  902 void
  903 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
  904 {
  905 
  906         r->r_bushandle = h;
  907 }
  908 
  909 bus_space_handle_t
  910 rman_get_bushandle(struct resource *r)
  911 {
  912 
  913         return (r->r_bushandle);
  914 }
  915 
  916 void
  917 rman_set_mapping(struct resource *r, struct resource_map *map)
  918 {
  919 
  920         KASSERT(rman_get_size(r) == map->r_size,
  921             ("rman_set_mapping: size mismatch"));
  922         rman_set_bustag(r, map->r_bustag);
  923         rman_set_bushandle(r, map->r_bushandle);
  924         rman_set_virtual(r, map->r_vaddr);
  925 }
  926 
  927 void
  928 rman_get_mapping(struct resource *r, struct resource_map *map)
  929 {
  930 
  931         map->r_bustag = rman_get_bustag(r);
  932         map->r_bushandle = rman_get_bushandle(r);
  933         map->r_size = rman_get_size(r);
  934         map->r_vaddr = rman_get_virtual(r);
  935 }
  936 
  937 void
  938 rman_set_rid(struct resource *r, int rid)
  939 {
  940 
  941         r->__r_i->r_rid = rid;
  942 }
  943 
  944 int
  945 rman_get_rid(struct resource *r)
  946 {
  947 
  948         return (r->__r_i->r_rid);
  949 }
  950 
  951 void
  952 rman_set_device(struct resource *r, device_t dev)
  953 {
  954 
  955         r->__r_i->r_dev = dev;
  956 }
  957 
  958 device_t
  959 rman_get_device(struct resource *r)
  960 {
  961 
  962         return (r->__r_i->r_dev);
  963 }
  964 
  965 int
  966 rman_is_region_manager(struct resource *r, struct rman *rm)
  967 {
  968 
  969         return (r->__r_i->r_rm == rm);
  970 }
  971 
  972 /*
  973  * Sysctl interface for scanning the resource lists.
  974  *
  975  * We take two input parameters; the index into the list of resource
  976  * managers, and the resource offset into the list.
  977  */
  978 static int
  979 sysctl_rman(SYSCTL_HANDLER_ARGS)
  980 {
  981         int                     *name = (int *)arg1;
  982         u_int                   namelen = arg2;
  983         int                     rman_idx, res_idx;
  984         struct rman             *rm;
  985         struct resource_i       *res;
  986         struct resource_i       *sres;
  987         struct u_rman           urm;
  988         struct u_resource       ures;
  989         int                     error;
  990 
  991         if (namelen != 3)
  992                 return (EINVAL);
  993 
  994         if (bus_data_generation_check(name[0]))
  995                 return (EINVAL);
  996         rman_idx = name[1];
  997         res_idx = name[2];
  998 
  999         /*
 1000          * Find the indexed resource manager
 1001          */
 1002         mtx_lock(&rman_mtx);
 1003         TAILQ_FOREACH(rm, &rman_head, rm_link) {
 1004                 if (rman_idx-- == 0)
 1005                         break;
 1006         }
 1007         mtx_unlock(&rman_mtx);
 1008         if (rm == NULL)
 1009                 return (ENOENT);
 1010 
 1011         /*
 1012          * If the resource index is -1, we want details on the
 1013          * resource manager.
 1014          */
 1015         if (res_idx == -1) {
 1016                 bzero(&urm, sizeof(urm));
 1017                 urm.rm_handle = (uintptr_t)rm;
 1018                 if (rm->rm_descr != NULL)
 1019                         strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
 1020                 urm.rm_start = rm->rm_start;
 1021                 urm.rm_size = rm->rm_end - rm->rm_start + 1;
 1022                 urm.rm_type = rm->rm_type;
 1023 
 1024                 error = SYSCTL_OUT(req, &urm, sizeof(urm));
 1025                 return (error);
 1026         }
 1027 
 1028         /*
 1029          * Find the indexed resource and return it.
 1030          */
 1031         mtx_lock(rm->rm_mtx);
 1032         TAILQ_FOREACH(res, &rm->rm_list, r_link) {
 1033                 if (res->r_sharehead != NULL) {
 1034                         LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
 1035                                 if (res_idx-- == 0) {
 1036                                         res = sres;
 1037                                         goto found;
 1038                                 }
 1039                 }
 1040                 else if (res_idx-- == 0)
 1041                                 goto found;
 1042         }
 1043         mtx_unlock(rm->rm_mtx);
 1044         return (ENOENT);
 1045 
 1046 found:
 1047         bzero(&ures, sizeof(ures));
 1048         ures.r_handle = (uintptr_t)res;
 1049         ures.r_parent = (uintptr_t)res->r_rm;
 1050         ures.r_device = (uintptr_t)res->r_dev;
 1051         if (res->r_dev != NULL) {
 1052                 if (device_get_name(res->r_dev) != NULL) {
 1053                         snprintf(ures.r_devname, RM_TEXTLEN,
 1054                             "%s%d",
 1055                             device_get_name(res->r_dev),
 1056                             device_get_unit(res->r_dev));
 1057                 } else {
 1058                         strlcpy(ures.r_devname, "nomatch",
 1059                             RM_TEXTLEN);
 1060                 }
 1061         } else {
 1062                 ures.r_devname[0] = '\0';
 1063         }
 1064         ures.r_start = res->r_start;
 1065         ures.r_size = res->r_end - res->r_start + 1;
 1066         ures.r_flags = res->r_flags;
 1067 
 1068         mtx_unlock(rm->rm_mtx);
 1069         error = SYSCTL_OUT(req, &ures, sizeof(ures));
 1070         return (error);
 1071 }
 1072 
 1073 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
 1074     "kernel resource manager");
 1075 
 1076 #ifdef DDB
 1077 static void
 1078 dump_rman_header(struct rman *rm)
 1079 {
 1080 
 1081         if (db_pager_quit)
 1082                 return;
 1083         db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
 1084             rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
 1085 }
 1086 
 1087 static void
 1088 dump_rman(struct rman *rm)
 1089 {
 1090         struct resource_i *r;
 1091         const char *devname;
 1092 
 1093         if (db_pager_quit)
 1094                 return;
 1095         TAILQ_FOREACH(r, &rm->rm_list, r_link) {
 1096                 if (r->r_dev != NULL) {
 1097                         devname = device_get_nameunit(r->r_dev);
 1098                         if (devname == NULL)
 1099                                 devname = "nomatch";
 1100                 } else
 1101                         devname = NULL;
 1102                 db_printf("    0x%jx-0x%jx (RID=%d) ",
 1103                     r->r_start, r->r_end, r->r_rid);
 1104                 if (devname != NULL)
 1105                         db_printf("(%s)\n", devname);
 1106                 else
 1107                         db_printf("----\n");
 1108                 if (db_pager_quit)
 1109                         return;
 1110         }
 1111 }
 1112 
 1113 DB_SHOW_COMMAND(rman, db_show_rman)
 1114 {
 1115 
 1116         if (have_addr) {
 1117                 dump_rman_header((struct rman *)addr);
 1118                 dump_rman((struct rman *)addr);
 1119         }
 1120 }
 1121 
 1122 DB_SHOW_COMMAND(rmans, db_show_rmans)
 1123 {
 1124         struct rman *rm;
 1125 
 1126         TAILQ_FOREACH(rm, &rman_head, rm_link) {
 1127                 dump_rman_header(rm);
 1128         }
 1129 }
 1130 
 1131 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
 1132 {
 1133         struct rman *rm;
 1134 
 1135         TAILQ_FOREACH(rm, &rman_head, rm_link) {
 1136                 dump_rman_header(rm);
 1137                 dump_rman(rm);
 1138         }
 1139 }
 1140 DB_SHOW_ALIAS(allrman, db_show_all_rman);
 1141 #endif

Cache object: c8f63f3adc33665513695dc44e3747d3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.