The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rman.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright 1998 Massachusetts Institute of Technology
    3  *
    4  * Permission to use, copy, modify, and distribute this software and
    5  * its documentation for any purpose and without fee is hereby
    6  * granted, provided that both the above copyright notice and this
    7  * permission notice appear in all copies, that both the above
    8  * copyright notice and this permission notice appear in all
    9  * supporting documentation, and that the name of M.I.T. not be used
   10  * in advertising or publicity pertaining to distribution of the
   11  * software without specific, written prior permission.  M.I.T. makes
   12  * no representations about the suitability of this software for any
   13  * purpose.  It is provided "as is" without express or implied
   14  * warranty.
   15  * 
   16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
   17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
   18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
   20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
   24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: releng/5.1/sys/kern/subr_rman.c 110753 2003-02-12 07:00:59Z imp $
   30  */
   31 
   32 /*
   33  * The kernel resource manager.  This code is responsible for keeping track
   34  * of hardware resources which are apportioned out to various drivers.
   35  * It does not actually assign those resources, and it is not expected
   36  * that end-device drivers will call into this code directly.  Rather,
   37  * the code which implements the buses that those devices are attached to,
   38  * and the code which manages CPU resources, will call this code, and the
   39  * end-device drivers will make upcalls to that code to actually perform
   40  * the allocation.
   41  *
   42  * There are two sorts of resources managed by this code.  The first is
   43  * the more familiar array (RMAN_ARRAY) type; resources in this class
   44  * consist of a sequence of individually-allocatable objects which have
   45  * been numbered in some well-defined order.  Most of the resources
   46  * are of this type, as it is the most familiar.  The second type is
   47  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
   48  * resources in which each instance is indistinguishable from every
   49  * other instance).  The principal anticipated application of gauges
   50  * is in the context of power consumption, where a bus may have a specific
   51  * power budget which all attached devices share.  RMAN_GAUGE is not
   52  * implemented yet.
   53  *
   54  * For array resources, we make one simplifying assumption: two clients
   55  * sharing the same resource must use the same range of indices.  That
   56  * is to say, sharing of overlapping-but-not-identical regions is not
   57  * permitted.
   58  */
   59 
   60 #include <sys/param.h>
   61 #include <sys/systm.h>
   62 #include <sys/kernel.h>
   63 #include <sys/lock.h>
   64 #include <sys/malloc.h>
   65 #include <sys/mutex.h>
   66 #include <sys/bus.h>            /* XXX debugging */
   67 #include <machine/bus.h>
   68 #include <sys/rman.h>
   69 #include <sys/sysctl.h>
   70 
   71 int     rman_debug = 0;
   72 TUNABLE_INT("debug.rman_debug", &rman_debug);
   73 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
   74     &rman_debug, 0, "rman debug");
   75 
   76 #define DPRINTF(params) if (rman_debug) printf params
   77 
   78 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
   79 
   80 struct  rman_head rman_head;
   81 static  struct mtx rman_mtx; /* mutex to protect rman_head */
   82 static  int int_rman_activate_resource(struct rman *rm, struct resource *r,
   83                                        struct resource **whohas);
   84 static  int int_rman_deactivate_resource(struct resource *r);
   85 static  int int_rman_release_resource(struct rman *rm, struct resource *r);
   86 
   87 int
   88 rman_init(struct rman *rm)
   89 {
   90         static int once;
   91 
   92         if (once == 0) {
   93                 once = 1;
   94                 TAILQ_INIT(&rman_head);
   95                 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
   96         }
   97 
   98         if (rm->rm_type == RMAN_UNINIT)
   99                 panic("rman_init");
  100         if (rm->rm_type == RMAN_GAUGE)
  101                 panic("implement RMAN_GAUGE");
  102 
  103         TAILQ_INIT(&rm->rm_list);
  104         rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
  105         if (rm->rm_mtx == 0)
  106                 return ENOMEM;
  107         mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
  108 
  109         mtx_lock(&rman_mtx);
  110         TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
  111         mtx_unlock(&rman_mtx);
  112         return 0;
  113 }
  114 
  115 /*
  116  * NB: this interface is not robust against programming errors which
  117  * add multiple copies of the same region.
  118  */
  119 int
  120 rman_manage_region(struct rman *rm, u_long start, u_long end)
  121 {
  122         struct resource *r, *s;
  123 
  124         r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
  125         if (r == 0)
  126                 return ENOMEM;
  127         r->r_start = start;
  128         r->r_end = end;
  129         r->r_rm = rm;
  130 
  131         mtx_lock(rm->rm_mtx);
  132         for (s = TAILQ_FIRST(&rm->rm_list);     
  133              s && s->r_end < r->r_start;
  134              s = TAILQ_NEXT(s, r_link))
  135                 ;
  136 
  137         if (s == NULL) {
  138                 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
  139         } else {
  140                 TAILQ_INSERT_BEFORE(s, r, r_link);
  141         }
  142 
  143         mtx_unlock(rm->rm_mtx);
  144         return 0;
  145 }
  146 
  147 int
  148 rman_fini(struct rman *rm)
  149 {
  150         struct resource *r;
  151 
  152         mtx_lock(rm->rm_mtx);
  153         TAILQ_FOREACH(r, &rm->rm_list, r_link) {
  154                 if (r->r_flags & RF_ALLOCATED) {
  155                         mtx_unlock(rm->rm_mtx);
  156                         return EBUSY;
  157                 }
  158         }
  159 
  160         /*
  161          * There really should only be one of these if we are in this
  162          * state and the code is working properly, but it can't hurt.
  163          */
  164         while (!TAILQ_EMPTY(&rm->rm_list)) {
  165                 r = TAILQ_FIRST(&rm->rm_list);
  166                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  167                 free(r, M_RMAN);
  168         }
  169         mtx_unlock(rm->rm_mtx);
  170         mtx_lock(&rman_mtx);
  171         TAILQ_REMOVE(&rman_head, rm, rm_link);
  172         mtx_unlock(&rman_mtx);
  173         mtx_destroy(rm->rm_mtx);
  174         free(rm->rm_mtx, M_RMAN);
  175 
  176         return 0;
  177 }
  178 
  179 struct resource *
  180 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
  181                       u_long count, u_long bound,  u_int flags,
  182                       struct device *dev)
  183 {
  184         u_int   want_activate;
  185         struct  resource *r, *s, *rv;
  186         u_long  rstart, rend, amask, bmask;
  187 
  188         rv = 0;
  189 
  190         DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
  191                "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
  192                flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
  193         want_activate = (flags & RF_ACTIVE);
  194         flags &= ~RF_ACTIVE;
  195 
  196         mtx_lock(rm->rm_mtx);
  197 
  198         for (r = TAILQ_FIRST(&rm->rm_list); 
  199              r && r->r_end < start;
  200              r = TAILQ_NEXT(r, r_link))
  201                 ;
  202 
  203         if (r == NULL) {
  204                 DPRINTF(("could not find a region\n"));
  205                 goto out;
  206         }
  207 
  208         amask = (1ul << RF_ALIGNMENT(flags)) - 1;
  209         /* If bound is 0, bmask will also be 0 */
  210         bmask = ~(bound - 1);
  211         /*
  212          * First try to find an acceptable totally-unshared region.
  213          */
  214         for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
  215                 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
  216                 if (s->r_start > end) {
  217                         DPRINTF(("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end));
  218                         break;
  219                 }
  220                 if (s->r_flags & RF_ALLOCATED) {
  221                         DPRINTF(("region is allocated\n"));
  222                         continue;
  223                 }
  224                 rstart = ulmax(s->r_start, start);
  225                 /*
  226                  * Try to find a region by adjusting to boundary and alignment
  227                  * until both conditions are satisfied. This is not an optimal
  228                  * algorithm, but in most cases it isn't really bad, either.
  229                  */
  230                 do {
  231                         rstart = (rstart + amask) & ~amask;
  232                         if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
  233                                 rstart += bound - (rstart & ~bmask);
  234                 } while ((rstart & amask) != 0 && rstart < end &&
  235                     rstart < s->r_end);
  236                 rend = ulmin(s->r_end, ulmax(rstart + count, end));
  237                 if (rstart > rend) {
  238                         DPRINTF(("adjusted start exceeds end\n"));
  239                         continue;
  240                 }
  241                 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
  242                        rstart, rend, (rend - rstart + 1), count));
  243 
  244                 if ((rend - rstart + 1) >= count) {
  245                         DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
  246                                rend, rstart, (rend - rstart + 1)));
  247                         if ((s->r_end - s->r_start + 1) == count) {
  248                                 DPRINTF(("candidate region is entire chunk\n"));
  249                                 rv = s;
  250                                 rv->r_flags |= RF_ALLOCATED | flags;
  251                                 rv->r_dev = dev;
  252                                 goto out;
  253                         }
  254 
  255                         /*
  256                          * If s->r_start < rstart and
  257                          *    s->r_end > rstart + count - 1, then
  258                          * we need to split the region into three pieces
  259                          * (the middle one will get returned to the user).
  260                          * Otherwise, we are allocating at either the
  261                          * beginning or the end of s, so we only need to
  262                          * split it in two.  The first case requires
  263                          * two new allocations; the second requires but one.
  264                          */
  265                         rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
  266                         if (rv == 0)
  267                                 goto out;
  268                         rv->r_start = rstart;
  269                         rv->r_end = rstart + count - 1;
  270                         rv->r_flags = flags | RF_ALLOCATED;
  271                         rv->r_dev = dev;
  272                         rv->r_rm = rm;
  273                         
  274                         if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
  275                                 DPRINTF(("splitting region in three parts: "
  276                                        "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
  277                                        s->r_start, rv->r_start - 1,
  278                                        rv->r_start, rv->r_end,
  279                                        rv->r_end + 1, s->r_end));
  280                                 /*
  281                                  * We are allocating in the middle.
  282                                  */
  283                                 r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
  284                                 if (r == 0) {
  285                                         free(rv, M_RMAN);
  286                                         rv = 0;
  287                                         goto out;
  288                                 }
  289                                 r->r_start = rv->r_end + 1;
  290                                 r->r_end = s->r_end;
  291                                 r->r_flags = s->r_flags;
  292                                 r->r_rm = rm;
  293                                 s->r_end = rv->r_start - 1;
  294                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  295                                                      r_link);
  296                                 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
  297                                                      r_link);
  298                         } else if (s->r_start == rv->r_start) {
  299                                 DPRINTF(("allocating from the beginning\n"));
  300                                 /*
  301                                  * We are allocating at the beginning.
  302                                  */
  303                                 s->r_start = rv->r_end + 1;
  304                                 TAILQ_INSERT_BEFORE(s, rv, r_link);
  305                         } else {
  306                                 DPRINTF(("allocating at the end\n"));
  307                                 /*
  308                                  * We are allocating at the end.
  309                                  */
  310                                 s->r_end = rv->r_start - 1;
  311                                 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
  312                                                      r_link);
  313                         }
  314                         goto out;
  315                 }
  316         }
  317 
  318         /*
  319          * Now find an acceptable shared region, if the client's requirements
  320          * allow sharing.  By our implementation restriction, a candidate
  321          * region must match exactly by both size and sharing type in order
  322          * to be considered compatible with the client's request.  (The
  323          * former restriction could probably be lifted without too much
  324          * additional work, but this does not seem warranted.)
  325          */
  326         DPRINTF(("no unshared regions found\n"));
  327         if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
  328                 goto out;
  329 
  330         for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
  331                 if (s->r_start > end)
  332                         break;
  333                 if ((s->r_flags & flags) != flags)
  334                         continue;
  335                 rstart = ulmax(s->r_start, start);
  336                 rend = ulmin(s->r_end, ulmax(start + count, end));
  337                 if (s->r_start >= start && s->r_end <= end
  338                     && (s->r_end - s->r_start + 1) == count &&
  339                     (s->r_start & amask) == 0 &&
  340                     ((s->r_start ^ s->r_end) & bmask) == 0) {
  341                         rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
  342                         if (rv == 0)
  343                                 goto out;
  344                         rv->r_start = s->r_start;
  345                         rv->r_end = s->r_end;
  346                         rv->r_flags = s->r_flags & 
  347                                 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
  348                         rv->r_dev = dev;
  349                         rv->r_rm = rm;
  350                         if (s->r_sharehead == 0) {
  351                                 s->r_sharehead = malloc(sizeof *s->r_sharehead,
  352                                                 M_RMAN, M_NOWAIT | M_ZERO);
  353                                 if (s->r_sharehead == 0) {
  354                                         free(rv, M_RMAN);
  355                                         rv = 0;
  356                                         goto out;
  357                                 }
  358                                 LIST_INIT(s->r_sharehead);
  359                                 LIST_INSERT_HEAD(s->r_sharehead, s, 
  360                                                  r_sharelink);
  361                                 s->r_flags |= RF_FIRSTSHARE;
  362                         }
  363                         rv->r_sharehead = s->r_sharehead;
  364                         LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
  365                         goto out;
  366                 }
  367         }
  368 
  369         /*
  370          * We couldn't find anything.
  371          */
  372 out:
  373         /*
  374          * If the user specified RF_ACTIVE in the initial flags,
  375          * which is reflected in `want_activate', we attempt to atomically
  376          * activate the resource.  If this fails, we release the resource
  377          * and indicate overall failure.  (This behavior probably doesn't
  378          * make sense for RF_TIMESHARE-type resources.)
  379          */
  380         if (rv && want_activate) {
  381                 struct resource *whohas;
  382                 if (int_rman_activate_resource(rm, rv, &whohas)) {
  383                         int_rman_release_resource(rm, rv);
  384                         rv = 0;
  385                 }
  386         }
  387                         
  388         mtx_unlock(rm->rm_mtx);
  389         return (rv);
  390 }
  391 
  392 struct resource *
  393 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
  394                       u_int flags, struct device *dev)
  395 {
  396 
  397         return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
  398             dev));
  399 }
  400 
  401 static int
  402 int_rman_activate_resource(struct rman *rm, struct resource *r,
  403                            struct resource **whohas)
  404 {
  405         struct resource *s;
  406         int ok;
  407 
  408         /*
  409          * If we are not timesharing, then there is nothing much to do.
  410          * If we already have the resource, then there is nothing at all to do.
  411          * If we are not on a sharing list with anybody else, then there is
  412          * little to do.
  413          */
  414         if ((r->r_flags & RF_TIMESHARE) == 0
  415             || (r->r_flags & RF_ACTIVE) != 0
  416             || r->r_sharehead == 0) {
  417                 r->r_flags |= RF_ACTIVE;
  418                 return 0;
  419         }
  420 
  421         ok = 1;
  422         for (s = LIST_FIRST(r->r_sharehead); s && ok;
  423              s = LIST_NEXT(s, r_sharelink)) {
  424                 if ((s->r_flags & RF_ACTIVE) != 0) {
  425                         ok = 0;
  426                         *whohas = s;
  427                 }
  428         }
  429         if (ok) {
  430                 r->r_flags |= RF_ACTIVE;
  431                 return 0;
  432         }
  433         return EBUSY;
  434 }
  435 
  436 int
  437 rman_activate_resource(struct resource *r)
  438 {
  439         int rv;
  440         struct resource *whohas;
  441         struct rman *rm;
  442 
  443         rm = r->r_rm;
  444         mtx_lock(rm->rm_mtx);
  445         rv = int_rman_activate_resource(rm, r, &whohas);
  446         mtx_unlock(rm->rm_mtx);
  447         return rv;
  448 }
  449 
  450 int
  451 rman_await_resource(struct resource *r, int pri, int timo)
  452 {
  453         int     rv;
  454         struct  resource *whohas;
  455         struct  rman *rm;
  456 
  457         rm = r->r_rm;
  458         mtx_lock(rm->rm_mtx);
  459         for (;;) {
  460                 rv = int_rman_activate_resource(rm, r, &whohas);
  461                 if (rv != EBUSY)
  462                         return (rv);    /* returns with mutex held */
  463 
  464                 if (r->r_sharehead == 0)
  465                         panic("rman_await_resource");
  466                 whohas->r_flags |= RF_WANTED;
  467                 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
  468                 if (rv) {
  469                         mtx_unlock(rm->rm_mtx);
  470                         return (rv);
  471                 }
  472         }
  473 }
  474 
  475 static int
  476 int_rman_deactivate_resource(struct resource *r)
  477 {
  478         struct  rman *rm;
  479 
  480         rm = r->r_rm;
  481         r->r_flags &= ~RF_ACTIVE;
  482         if (r->r_flags & RF_WANTED) {
  483                 r->r_flags &= ~RF_WANTED;
  484                 wakeup(r->r_sharehead);
  485         }
  486         return 0;
  487 }
  488 
  489 int
  490 rman_deactivate_resource(struct resource *r)
  491 {
  492         struct  rman *rm;
  493 
  494         rm = r->r_rm;
  495         mtx_lock(rm->rm_mtx);
  496         int_rman_deactivate_resource(r);
  497         mtx_unlock(rm->rm_mtx);
  498         return 0;
  499 }
  500 
  501 static int
  502 int_rman_release_resource(struct rman *rm, struct resource *r)
  503 {
  504         struct  resource *s, *t;
  505 
  506         if (r->r_flags & RF_ACTIVE)
  507                 int_rman_deactivate_resource(r);
  508 
  509         /*
  510          * Check for a sharing list first.  If there is one, then we don't
  511          * have to think as hard.
  512          */
  513         if (r->r_sharehead) {
  514                 /*
  515                  * If a sharing list exists, then we know there are at
  516                  * least two sharers.
  517                  *
  518                  * If we are in the main circleq, appoint someone else.
  519                  */
  520                 LIST_REMOVE(r, r_sharelink);
  521                 s = LIST_FIRST(r->r_sharehead);
  522                 if (r->r_flags & RF_FIRSTSHARE) {
  523                         s->r_flags |= RF_FIRSTSHARE;
  524                         TAILQ_INSERT_BEFORE(r, s, r_link);
  525                         TAILQ_REMOVE(&rm->rm_list, r, r_link);
  526                 }
  527 
  528                 /*
  529                  * Make sure that the sharing list goes away completely
  530                  * if the resource is no longer being shared at all.
  531                  */
  532                 if (LIST_NEXT(s, r_sharelink) == 0) {
  533                         free(s->r_sharehead, M_RMAN);
  534                         s->r_sharehead = 0;
  535                         s->r_flags &= ~RF_FIRSTSHARE;
  536                 }
  537                 goto out;
  538         }
  539 
  540         /*
  541          * Look at the adjacent resources in the list and see if our
  542          * segment can be merged with any of them.
  543          */
  544         s = TAILQ_PREV(r, resource_head, r_link);
  545         t = TAILQ_NEXT(r, r_link);
  546 
  547         if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
  548             && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
  549                 /*
  550                  * Merge all three segments.
  551                  */
  552                 s->r_end = t->r_end;
  553                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  554                 TAILQ_REMOVE(&rm->rm_list, t, r_link);
  555                 free(t, M_RMAN);
  556         } else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
  557                 /*
  558                  * Merge previous segment with ours.
  559                  */
  560                 s->r_end = r->r_end;
  561                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  562         } else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
  563                 /*
  564                  * Merge next segment with ours.
  565                  */
  566                 t->r_start = r->r_start;
  567                 TAILQ_REMOVE(&rm->rm_list, r, r_link);
  568         } else {
  569                 /*
  570                  * At this point, we know there is nothing we
  571                  * can potentially merge with, because on each
  572                  * side, there is either nothing there or what is
  573                  * there is still allocated.  In that case, we don't
  574                  * want to remove r from the list; we simply want to
  575                  * change it to an unallocated region and return
  576                  * without freeing anything.
  577                  */
  578                 r->r_flags &= ~RF_ALLOCATED;
  579                 return 0;
  580         }
  581 
  582 out:
  583         free(r, M_RMAN);
  584         return 0;
  585 }
  586 
  587 int
  588 rman_release_resource(struct resource *r)
  589 {
  590         int     rv;
  591         struct  rman *rm = r->r_rm;
  592 
  593         mtx_lock(rm->rm_mtx);
  594         rv = int_rman_release_resource(rm, r);
  595         mtx_unlock(rm->rm_mtx);
  596         return (rv);
  597 }
  598 
  599 uint32_t
  600 rman_make_alignment_flags(uint32_t size)
  601 {
  602         int     i;
  603 
  604         /*
  605          * Find the hightest bit set, and add one if more than one bit
  606          * set.  We're effectively computing the ceil(log2(size)) here.
  607          */
  608         for (i = 31; i > 0; i--)
  609                 if ((1 << i) & size)
  610                         break;
  611         if (~(1 << i) & size)
  612                 i++;
  613 
  614         return(RF_ALIGNMENT_LOG2(i));
  615 }
  616 
  617 u_long
  618 rman_get_start(struct resource *r)
  619 {
  620         return (r->r_start);
  621 }
  622 
  623 u_long
  624 rman_get_end(struct resource *r)
  625 {
  626         return (r->r_end);
  627 }
  628 
  629 u_long
  630 rman_get_size(struct resource *r)
  631 {
  632         return (r->r_end - r->r_start + 1);
  633 }
  634 
  635 u_int
  636 rman_get_flags(struct resource *r)
  637 {
  638         return (r->r_flags);
  639 }
  640 
  641 void
  642 rman_set_virtual(struct resource *r, void *v)
  643 {
  644         r->r_virtual = v;
  645 }
  646 
  647 void *
  648 rman_get_virtual(struct resource *r)
  649 {
  650         return (r->r_virtual);
  651 }
  652 
  653 void
  654 rman_set_bustag(struct resource *r, bus_space_tag_t t)
  655 {
  656         r->r_bustag = t;
  657 }
  658 
  659 bus_space_tag_t
  660 rman_get_bustag(struct resource *r)
  661 {
  662         return (r->r_bustag);
  663 }
  664 
  665 void
  666 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
  667 {
  668         r->r_bushandle = h;
  669 }
  670 
  671 bus_space_handle_t
  672 rman_get_bushandle(struct resource *r)
  673 {
  674         return (r->r_bushandle);
  675 }
  676 
  677 void
  678 rman_set_rid(struct resource *r, int rid)
  679 {
  680         r->r_rid = rid;
  681 }
  682 
  683 int
  684 rman_get_rid(struct resource *r)
  685 {
  686         return (r->r_rid);
  687 }
  688 
  689 struct device *
  690 rman_get_device(struct resource *r)
  691 {
  692         return (r->r_dev);
  693 }

Cache object: 0bb37c4405b7e92c64389c7acfca4e81


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.