The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rman.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 1998 Massachusetts Institute of Technology
    3  *
    4  * Permission to use, copy, modify, and distribute this software and
    5  * its documentation for any purpose and without fee is hereby
    6  * granted, provided that both the above copyright notice and this
    7  * permission notice appear in all copies, that both the above
    8  * copyright notice and this permission notice appear in all
    9  * supporting documentation, and that the name of M.I.T. not be used
   10  * in advertising or publicity pertaining to distribution of the
   11  * software without specific, written prior permission.  M.I.T. makes
   12  * no representations about the suitability of this software for any
   13  * purpose.  It is provided "as is" without express or implied
   14  * warranty.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
   17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
   18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
   20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
   24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * The kernel resource manager.  This code is responsible for keeping track
   32  * of hardware resources which are apportioned out to various drivers.
   33  * It does not actually assign those resources, and it is not expected
   34  * that end-device drivers will call into this code directly.  Rather,
   35  * the code which implements the buses that those devices are attached to,
   36  * and the code which manages CPU resources, will call this code, and the
   37  * end-device drivers will make upcalls to that code to actually perform
   38  * the allocation.
   39  *
   40  * There are two sorts of resources managed by this code.  The first is
   41  * the more familiar array (RMAN_ARRAY) type; resources in this class
   42  * consist of a sequence of individually-allocatable objects which have
   43  * been numbered in some well-defined order.  Most of the resources
   44  * are of this type, as it is the most familiar.  The second type is
   45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
   46  * resources in which each instance is indistinguishable from every
   47  * other instance).  The principal anticipated application of gauges
   48  * is in the context of power consumption, where a bus may have a specific
   49  * power budget which all attached devices share.  RMAN_GAUGE is not
   50  * implemented yet.
   51  *
   52  * For array resources, we make one simplifying assumption: two clients
   53  * sharing the same resource must use the same range of indices.  That
   54  * is to say, sharing of overlapping-but-not-identical regions is not
   55  * permitted.
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __FBSDID("$FreeBSD: releng/6.2/sys/kern/subr_rman.c 164994 2006-12-07 22:28:17Z jhb $");
   60 
   61 #define __RMAN_RESOURCE_VISIBLE
   62 #include <sys/param.h>
   63 #include <sys/systm.h>
   64 #include <sys/kernel.h>
   65 #include <sys/limits.h>
   66 #include <sys/lock.h>
   67 #include <sys/malloc.h>
   68 #include <sys/mutex.h>
   69 #include <sys/bus.h>            /* XXX debugging */
   70 #include <machine/bus.h>
   71 #include <sys/rman.h>
   72 #include <sys/sysctl.h>
   73 
   74 int     rman_debug = 0;
   75 TUNABLE_INT("debug.rman_debug", &rman_debug);
   76 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
   77     &rman_debug, 0, "rman debug");
   78 
   79 #define DPRINTF(params) if (rman_debug) printf params
   80 
   81 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
   82 
   83 struct  rman_head rman_head;
   84 static  struct mtx rman_mtx; /* mutex to protect rman_head */
   85 static  int int_rman_activate_resource(struct rman *rm, struct resource *r,
   86                                        struct resource **whohas);
   87 static  int int_rman_deactivate_resource(struct resource *r);
   88 static  int int_rman_release_resource(struct rman *rm, struct resource *r);
   89 
   90 int
   91 rman_init(struct rman *rm)
   92 {
   93         static int once = 0;
   94 
   95         if (once == 0) {
   96                 once = 1;
   97                 TAILQ_INIT(&rman_head);
   98                 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
   99         }
  100 
  101         if (rm->rm_type == RMAN_UNINIT)
  102                 panic("rman_init");
  103         if (rm->rm_type == RMAN_GAUGE)
  104                 panic("implement RMAN_GAUGE");
  105 
  106         TAILQ_INIT(&rm->rm_list);
  107         rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
  108         if (rm->rm_mtx == NULL)
  109                 return ENOMEM;
  110         mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
  111 
  112         mtx_lock(&rman_mtx);
  113         TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
  114         mtx_unlock(&rman_mtx);
  115         return 0;
  116 }
  117 
  118 /*
  119  * NB: this interface is not robust against programming errors which
  120  * add multiple copies of the same region.
  121  */
  122 int
  123 rman_manage_region(struct rman *rm, u_long start, u_long end)
  124 {
  125         struct resource *r, *s, *t;
  126 
  127         DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
  128             rm->rm_descr, start, end));
  129         r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
  130         if (r == NULL)
  131                 return ENOMEM;
  132         r->r_start = start;
  133         r->r_end = end;
  134         r->r_rm = rm;
  135 
  136         mtx_lock(rm->rm_mtx);
  137 
  138         /* Skip entries before us. */
  139         TAILQ_FOREACH(s, &rm->rm_list, r_link) {
  140                 if (s->r_end == ULONG_MAX)
  141                         break;
  142                 if (s->r_end + 1 >= r->r_start)
  143                         break;
  144         }
  145 
  146         /* If we ran off the end of the list, insert at the tail. */
  147         if (s == NULL) {
  148                 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
  149         } else {
  150                 /* Check for any overlap with the current region. */
  151                 if (r->r_start <= s->r_end && r->r_end >= s->r_start)
  152                         return EBUSY;
  153 
  154                 /* Check for any overlap with the next region. */
  155                 t = TAILQ_NEXT(s, r_link);
  156                 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
  157                         return EBUSY;
  158 
  159                 /*
  160                  * See if this region can be merged with the next region.  If
  161                  * not, clear the pointer.
  162                  */
  163                 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
  164                         t = NULL;
  165 
  166                 /* See if we can merge with the current region. */
  167                 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
  168                         /* Can we merge all 3 regions? */
  169                         if (t != NULL) {
  170                                 s->r_end = t->r_end;
  171                                 TAILQ_REMOVE(&rm->rm_list, t, r_link);
  172                                 free(r, M_RMAN);
  173                                 free(t, M_RMAN);
  174                         } else {
  175                                 s->r_end = r->r_end;
  176                                 free(r, M_RMAN);
  177                         }
  178                 } else {
  179                         /* Can we merge with just the next region? */
  180                         if (t != NULL) {
  181                                 t->r_start = r->r_start;
  182                                 free(r, M_RMAN);
  183                         } else
  184                                 TAILQ_INSERT_BEFORE(s, r, r_link);
  185                 }
  186         }
  187 
  188         mtx_unlock(rm->rm_mtx);
  189         return 0;
  190 }
  191 
  192 int
  193 rman_fini(struct rman *rm)
  194 {
  195         struct resource *r;
  196 
  197         mtx_lock(rm->rm_mtx);
  198         TAILQ_FOREACH(r, &rm->rm_list, r_link) {
  199                 if (r->r_flags & RF_ALLOCATED) {
  200                         mtx_unlock(rm->rm_mtx);
  201                         return EBUSY;
  202                 }
  203         }
  204 
  205         /*
  206          * There really should only be one of these if we are in this
  207          * state and the code is working properly, but it can't hurt.
  208          */
  209         while (!TAILQ_EMPTY(&rm->rm_list)) {
  210                 r = TAILQ_FIRST(&rm->rm_list);
  211                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  212                 free(r, M_RMAN);
  213         }
  214         mtx_unlock(rm->rm_mtx);
  215         mtx_lock(&rman_mtx);
  216         TAILQ_REMOVE(&rman_head, rm, rm_link);
  217         mtx_unlock(&rman_mtx);
  218         mtx_destroy(rm->rm_mtx);
  219         free(rm->rm_mtx, M_RMAN);
  220 
  221         return 0;
  222 }
  223 
  224 struct resource *
  225 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
  226                       u_long count, u_long bound,  u_int flags,
  227                       struct device *dev)
  228 {
  229         u_int   want_activate;
  230         struct  resource *r, *s, *rv;
  231         u_long  rstart, rend, amask, bmask;
  232 
  233         rv = NULL;
  234 
  235         DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
  236                "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
  237                flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
  238         want_activate = (flags & RF_ACTIVE);
  239         flags &= ~RF_ACTIVE;
  240 
  241         mtx_lock(rm->rm_mtx);
  242 
  243         for (r = TAILQ_FIRST(&rm->rm_list);
  244              r && r->r_end < start;
  245              r = TAILQ_NEXT(r, r_link))
  246                 ;
  247 
  248         if (r == NULL) {
  249                 DPRINTF(("could not find a region\n"));
  250                 goto out;
  251         }
  252 
  253         amask = (1ul << RF_ALIGNMENT(flags)) - 1;
  254         /* If bound is 0, bmask will also be 0 */
  255         bmask = ~(bound - 1);
  256         /*
  257          * First try to find an acceptable totally-unshared region.
  258          */
  259         for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
  260                 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
  261                 if (s->r_start + count - 1 > end) {
  262                         DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
  263                             s->r_start, end));
  264                         break;
  265                 }
  266                 if (s->r_flags & RF_ALLOCATED) {
  267                         DPRINTF(("region is allocated\n"));
  268                         continue;
  269                 }
  270                 rstart = ulmax(s->r_start, start);
  271                 /*
  272                  * Try to find a region by adjusting to boundary and alignment
  273                  * until both conditions are satisfied. This is not an optimal
  274                  * algorithm, but in most cases it isn't really bad, either.
  275                  */
  276                 do {
  277                         rstart = (rstart + amask) & ~amask;
  278                         if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
  279                                 rstart += bound - (rstart & ~bmask);
  280                 } while ((rstart & amask) != 0 && rstart < end &&
  281                     rstart < s->r_end);
  282                 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
  283                 if (rstart > rend) {
  284                         DPRINTF(("adjusted start exceeds end\n"));
  285                         continue;
  286                 }
  287                 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
  288                        rstart, rend, (rend - rstart + 1), count));
  289 
  290                 if ((rend - rstart + 1) >= count) {
  291                         DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
  292                                rstart, rend, (rend - rstart + 1)));
  293                         if ((s->r_end - s->r_start + 1) == count) {
  294                                 DPRINTF(("candidate region is entire chunk\n"));
  295                                 rv = s;
  296                                 rv->r_flags |= RF_ALLOCATED | flags;
  297                                 rv->r_dev = dev;
  298                                 goto out;
  299                         }
  300 
  301                         /*
  302                          * If s->r_start < rstart and
  303                          *    s->r_end > rstart + count - 1, then
  304                          * we need to split the region into three pieces
  305                          * (the middle one will get returned to the user).
  306                          * Otherwise, we are allocating at either the
  307                          * beginning or the end of s, so we only need to
  308                          * split it in two.  The first case requires
  309                          * two new allocations; the second requires but one.
  310                          */
  311                         rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
  312                         if (rv == NULL)
  313                                 goto out;
  314                         rv->r_start = rstart;
  315                         rv->r_end = rstart + count - 1;
  316                         rv->r_flags = flags | RF_ALLOCATED;
  317                         rv->r_dev = dev;
  318                         rv->r_rm = rm;
  319 
  320                         if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
  321                                 DPRINTF(("splitting region in three parts: "
  322                                        "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
  323                                        s->r_start, rv->r_start - 1,
  324                                        rv->r_start, rv->r_end,
  325                                        rv->r_end + 1, s->r_end));
  326                                 /*
  327                                  * We are allocating in the middle.
  328                                  */
  329                                 r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
  330                                 if (r == NULL) {
  331                                         free(rv, M_RMAN);
  332                                         rv = NULL;
  333                                         goto out;
  334                                 }
  335                                 r->r_start = rv->r_end + 1;
  336                                 r->r_end = s->r_end;
  337                                 r->r_flags = s->r_flags;
  338                                 r->r_rm = rm;
  339                                 s->r_end = rv->r_start - 1;
  340                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  341                                                      r_link);
  342                                 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
  343                                                      r_link);
  344                         } else if (s->r_start == rv->r_start) {
  345                                 DPRINTF(("allocating from the beginning\n"));
  346                                 /*
  347                                  * We are allocating at the beginning.
  348                                  */
  349                                 s->r_start = rv->r_end + 1;
  350                                 TAILQ_INSERT_BEFORE(s, rv, r_link);
  351                         } else {
  352                                 DPRINTF(("allocating at the end\n"));
  353                                 /*
  354                                  * We are allocating at the end.
  355                                  */
  356                                 s->r_end = rv->r_start - 1;
  357                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  358                                                      r_link);
  359                         }
  360                         goto out;
  361                 }
  362         }
  363 
  364         /*
  365          * Now find an acceptable shared region, if the client's requirements
  366          * allow sharing.  By our implementation restriction, a candidate
  367          * region must match exactly by both size and sharing type in order
  368          * to be considered compatible with the client's request.  (The
  369          * former restriction could probably be lifted without too much
  370          * additional work, but this does not seem warranted.)
  371          */
  372         DPRINTF(("no unshared regions found\n"));
  373         if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
  374                 goto out;
  375 
  376         for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
  377                 if (s->r_start > end)
  378                         break;
  379                 if ((s->r_flags & flags) != flags)
  380                         continue;
  381                 rstart = ulmax(s->r_start, start);
  382                 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
  383                 if (s->r_start >= start && s->r_end <= end
  384                     && (s->r_end - s->r_start + 1) == count &&
  385                     (s->r_start & amask) == 0 &&
  386                     ((s->r_start ^ s->r_end) & bmask) == 0) {
  387                         rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
  388                         if (rv == NULL)
  389                                 goto out;
  390                         rv->r_start = s->r_start;
  391                         rv->r_end = s->r_end;
  392                         rv->r_flags = s->r_flags &
  393                                 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
  394                         rv->r_dev = dev;
  395                         rv->r_rm = rm;
  396                         if (s->r_sharehead == NULL) {
  397                                 s->r_sharehead = malloc(sizeof *s->r_sharehead,
  398                                                 M_RMAN, M_NOWAIT | M_ZERO);
  399                                 if (s->r_sharehead == NULL) {
  400                                         free(rv, M_RMAN);
  401                                         rv = NULL;
  402                                         goto out;
  403                                 }
  404                                 LIST_INIT(s->r_sharehead);
  405                                 LIST_INSERT_HEAD(s->r_sharehead, s,
  406                                                  r_sharelink);
  407                                 s->r_flags |= RF_FIRSTSHARE;
  408                         }
  409                         rv->r_sharehead = s->r_sharehead;
  410                         LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
  411                         goto out;
  412                 }
  413         }
  414 
  415         /*
  416          * We couldn't find anything.
  417          */
  418 out:
  419         /*
  420          * If the user specified RF_ACTIVE in the initial flags,
  421          * which is reflected in `want_activate', we attempt to atomically
  422          * activate the resource.  If this fails, we release the resource
  423          * and indicate overall failure.  (This behavior probably doesn't
  424          * make sense for RF_TIMESHARE-type resources.)
  425          */
  426         if (rv && want_activate) {
  427                 struct resource *whohas;
  428                 if (int_rman_activate_resource(rm, rv, &whohas)) {
  429                         int_rman_release_resource(rm, rv);
  430                         rv = NULL;
  431                 }
  432         }
  433 
  434         mtx_unlock(rm->rm_mtx);
  435         return (rv);
  436 }
  437 
  438 struct resource *
  439 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
  440                       u_int flags, struct device *dev)
  441 {
  442 
  443         return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
  444             dev));
  445 }
  446 
  447 static int
  448 int_rman_activate_resource(struct rman *rm, struct resource *r,
  449                            struct resource **whohas)
  450 {
  451         struct resource *s;
  452         int ok;
  453 
  454         /*
  455          * If we are not timesharing, then there is nothing much to do.
  456          * If we already have the resource, then there is nothing at all to do.
  457          * If we are not on a sharing list with anybody else, then there is
  458          * little to do.
  459          */
  460         if ((r->r_flags & RF_TIMESHARE) == 0
  461             || (r->r_flags & RF_ACTIVE) != 0
  462             || r->r_sharehead == NULL) {
  463                 r->r_flags |= RF_ACTIVE;
  464                 return 0;
  465         }
  466 
  467         ok = 1;
  468         for (s = LIST_FIRST(r->r_sharehead); s && ok;
  469              s = LIST_NEXT(s, r_sharelink)) {
  470                 if ((s->r_flags & RF_ACTIVE) != 0) {
  471                         ok = 0;
  472                         *whohas = s;
  473                 }
  474         }
  475         if (ok) {
  476                 r->r_flags |= RF_ACTIVE;
  477                 return 0;
  478         }
  479         return EBUSY;
  480 }
  481 
  482 int
  483 rman_activate_resource(struct resource *r)
  484 {
  485         int rv;
  486         struct resource *whohas;
  487         struct rman *rm;
  488 
  489         rm = r->r_rm;
  490         mtx_lock(rm->rm_mtx);
  491         rv = int_rman_activate_resource(rm, r, &whohas);
  492         mtx_unlock(rm->rm_mtx);
  493         return rv;
  494 }
  495 
  496 int
  497 rman_await_resource(struct resource *r, int pri, int timo)
  498 {
  499         int     rv;
  500         struct  resource *whohas;
  501         struct  rman *rm;
  502 
  503         rm = r->r_rm;
  504         mtx_lock(rm->rm_mtx);
  505         for (;;) {
  506                 rv = int_rman_activate_resource(rm, r, &whohas);
  507                 if (rv != EBUSY)
  508                         return (rv);    /* returns with mutex held */
  509 
  510                 if (r->r_sharehead == NULL)
  511                         panic("rman_await_resource");
  512                 whohas->r_flags |= RF_WANTED;
  513                 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
  514                 if (rv) {
  515                         mtx_unlock(rm->rm_mtx);
  516                         return (rv);
  517                 }
  518         }
  519 }
  520 
  521 static int
  522 int_rman_deactivate_resource(struct resource *r)
  523 {
  524 
  525         r->r_flags &= ~RF_ACTIVE;
  526         if (r->r_flags & RF_WANTED) {
  527                 r->r_flags &= ~RF_WANTED;
  528                 wakeup(r->r_sharehead);
  529         }
  530         return 0;
  531 }
  532 
  533 int
  534 rman_deactivate_resource(struct resource *r)
  535 {
  536         struct  rman *rm;
  537 
  538         rm = r->r_rm;
  539         mtx_lock(rm->rm_mtx);
  540         int_rman_deactivate_resource(r);
  541         mtx_unlock(rm->rm_mtx);
  542         return 0;
  543 }
  544 
  545 static int
  546 int_rman_release_resource(struct rman *rm, struct resource *r)
  547 {
  548         struct  resource *s, *t;
  549 
  550         if (r->r_flags & RF_ACTIVE)
  551                 int_rman_deactivate_resource(r);
  552 
  553         /*
  554          * Check for a sharing list first.  If there is one, then we don't
  555          * have to think as hard.
  556          */
  557         if (r->r_sharehead) {
  558                 /*
  559                  * If a sharing list exists, then we know there are at
  560                  * least two sharers.
  561                  *
  562                  * If we are in the main circleq, appoint someone else.
  563                  */
  564                 LIST_REMOVE(r, r_sharelink);
  565                 s = LIST_FIRST(r->r_sharehead);
  566                 if (r->r_flags & RF_FIRSTSHARE) {
  567                         s->r_flags |= RF_FIRSTSHARE;
  568                         TAILQ_INSERT_BEFORE(r, s, r_link);
  569                         TAILQ_REMOVE(&rm->rm_list, r, r_link);
  570                 }
  571 
  572                 /*
  573                  * Make sure that the sharing list goes away completely
  574                  * if the resource is no longer being shared at all.
  575                  */
  576                 if (LIST_NEXT(s, r_sharelink) == NULL) {
  577                         free(s->r_sharehead, M_RMAN);
  578                         s->r_sharehead = NULL;
  579                         s->r_flags &= ~RF_FIRSTSHARE;
  580                 }
  581                 goto out;
  582         }
  583 
  584         /*
  585          * Look at the adjacent resources in the list and see if our
  586          * segment can be merged with any of them.  If either of the
  587          * resources is allocated or is not exactly adjacent then they
  588          * cannot be merged with our segment.
  589          */
  590         s = TAILQ_PREV(r, resource_head, r_link);
  591         if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
  592             s->r_end + 1 != r->r_start))
  593                 s = NULL;
  594         t = TAILQ_NEXT(r, r_link);
  595         if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
  596             r->r_end + 1 != t->r_start))
  597                 t = NULL;
  598 
  599         if (s != NULL && t != NULL) {
  600                 /*
  601                  * Merge all three segments.
  602                  */
  603                 s->r_end = t->r_end;
  604                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  605                 TAILQ_REMOVE(&rm->rm_list, t, r_link);
  606                 free(t, M_RMAN);
  607         } else if (s != NULL) {
  608                 /*
  609                  * Merge previous segment with ours.
  610                  */
  611                 s->r_end = r->r_end;
  612                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  613         } else if (t != NULL) {
  614                 /*
  615                  * Merge next segment with ours.
  616                  */
  617                 t->r_start = r->r_start;
  618                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  619         } else {
  620                 /*
  621                  * At this point, we know there is nothing we
  622                  * can potentially merge with, because on each
  623                  * side, there is either nothing there or what is
  624                  * there is still allocated.  In that case, we don't
  625                  * want to remove r from the list; we simply want to
  626                  * change it to an unallocated region and return
  627                  * without freeing anything.
  628                  */
  629                 r->r_flags &= ~RF_ALLOCATED;
  630                 return 0;
  631         }
  632 
  633 out:
  634         free(r, M_RMAN);
  635         return 0;
  636 }
  637 
  638 int
  639 rman_release_resource(struct resource *r)
  640 {
  641         int     rv;
  642         struct  rman *rm = r->r_rm;
  643 
  644         mtx_lock(rm->rm_mtx);
  645         rv = int_rman_release_resource(rm, r);
  646         mtx_unlock(rm->rm_mtx);
  647         return (rv);
  648 }
  649 
  650 uint32_t
  651 rman_make_alignment_flags(uint32_t size)
  652 {
  653         int     i;
  654 
  655         /*
  656          * Find the hightest bit set, and add one if more than one bit
  657          * set.  We're effectively computing the ceil(log2(size)) here.
  658          */
  659         for (i = 31; i > 0; i--)
  660                 if ((1 << i) & size)
  661                         break;
  662         if (~(1 << i) & size)
  663                 i++;
  664 
  665         return(RF_ALIGNMENT_LOG2(i));
  666 }
  667 
  668 u_long
  669 rman_get_start(struct resource *r)
  670 {
  671         return (r->r_start);
  672 }
  673 
  674 u_long
  675 rman_get_end(struct resource *r)
  676 {
  677         return (r->r_end);
  678 }
  679 
  680 u_long
  681 rman_get_size(struct resource *r)
  682 {
  683         return (r->r_end - r->r_start + 1);
  684 }
  685 
  686 u_int
  687 rman_get_flags(struct resource *r)
  688 {
  689         return (r->r_flags);
  690 }
  691 
  692 void
  693 rman_set_virtual(struct resource *r, void *v)
  694 {
  695         r->r_virtual = v;
  696 }
  697 
  698 void *
  699 rman_get_virtual(struct resource *r)
  700 {
  701         return (r->r_virtual);
  702 }
  703 
  704 void
  705 rman_set_bustag(struct resource *r, bus_space_tag_t t)
  706 {
  707         r->r_bustag = t;
  708 }
  709 
  710 bus_space_tag_t
  711 rman_get_bustag(struct resource *r)
  712 {
  713         return (r->r_bustag);
  714 }
  715 
  716 void
  717 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
  718 {
  719         r->r_bushandle = h;
  720 }
  721 
  722 bus_space_handle_t
  723 rman_get_bushandle(struct resource *r)
  724 {
  725         return (r->r_bushandle);
  726 }
  727 
  728 void
  729 rman_set_rid(struct resource *r, int rid)
  730 {
  731         r->r_rid = rid;
  732 }
  733 
  734 void
  735 rman_set_start(struct resource *r, u_long start)
  736 {
  737         r->r_start = start;
  738 }
  739 
  740 void
  741 rman_set_end(struct resource *r, u_long end)
  742 {
  743         r->r_end = end;
  744 }
  745 
  746 int
  747 rman_get_rid(struct resource *r)
  748 {
  749         return (r->r_rid);
  750 }
  751 
  752 struct device *
  753 rman_get_device(struct resource *r)
  754 {
  755         return (r->r_dev);
  756 }
  757 
  758 void
  759 rman_set_device(struct resource *r, struct device *dev)
  760 {
  761         r->r_dev = dev;
  762 }
  763 
  764 /*
  765  * Sysctl interface for scanning the resource lists.
  766  *
  767  * We take two input parameters; the index into the list of resource
  768  * managers, and the resource offset into the list.
  769  */
  770 static int
  771 sysctl_rman(SYSCTL_HANDLER_ARGS)
  772 {
  773         int                     *name = (int *)arg1;
  774         u_int                   namelen = arg2;
  775         int                     rman_idx, res_idx;
  776         struct rman             *rm;
  777         struct resource         *res;
  778         struct u_rman           urm;
  779         struct u_resource       ures;
  780         int                     error;
  781 
  782         if (namelen != 3)
  783                 return (EINVAL);
  784 
  785         if (bus_data_generation_check(name[0]))
  786                 return (EINVAL);
  787         rman_idx = name[1];
  788         res_idx = name[2];
  789 
  790         /*
  791          * Find the indexed resource manager
  792          */
  793         mtx_lock(&rman_mtx);
  794         TAILQ_FOREACH(rm, &rman_head, rm_link) {
  795                 if (rman_idx-- == 0)
  796                         break;
  797         }
  798         mtx_unlock(&rman_mtx);
  799         if (rm == NULL)
  800                 return (ENOENT);
  801 
  802         /*
  803          * If the resource index is -1, we want details on the
  804          * resource manager.
  805          */
  806         if (res_idx == -1) {
  807                 bzero(&urm, sizeof(urm));
  808                 urm.rm_handle = (uintptr_t)rm;
  809                 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
  810                 urm.rm_start = rm->rm_start;
  811                 urm.rm_size = rm->rm_end - rm->rm_start + 1;
  812                 urm.rm_type = rm->rm_type;
  813 
  814                 error = SYSCTL_OUT(req, &urm, sizeof(urm));
  815                 return (error);
  816         }
  817 
  818         /*
  819          * Find the indexed resource and return it.
  820          */
  821         mtx_lock(&rman_mtx);
  822         TAILQ_FOREACH(res, &rm->rm_list, r_link) {
  823                 if (res_idx-- == 0) {
  824                         bzero(&ures, sizeof(ures));
  825                         ures.r_handle = (uintptr_t)res;
  826                         ures.r_parent = (uintptr_t)res->r_rm;
  827                         ures.r_device = (uintptr_t)res->r_dev;
  828                         if (res->r_dev != NULL) {
  829                                 if (device_get_name(res->r_dev) != NULL) {
  830                                         snprintf(ures.r_devname, RM_TEXTLEN,
  831                                             "%s%d",
  832                                             device_get_name(res->r_dev),
  833                                             device_get_unit(res->r_dev));
  834                                 } else {
  835                                         strlcpy(ures.r_devname, "nomatch",
  836                                             RM_TEXTLEN);
  837                                 }
  838                         } else {
  839                                 ures.r_devname[0] = '\0';
  840                         }
  841                         ures.r_start = res->r_start;
  842                         ures.r_size = res->r_end - res->r_start + 1;
  843                         ures.r_flags = res->r_flags;
  844 
  845                         mtx_unlock(&rman_mtx);
  846                         error = SYSCTL_OUT(req, &ures, sizeof(ures));
  847                         return (error);
  848                 }
  849         }
  850         mtx_unlock(&rman_mtx);
  851         return (ENOENT);
  852 }
  853 
  854 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
  855     "kernel resource manager");

Cache object: c980399ee9f9abf49474816c4b07df19


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.