FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rman.c
1 /*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD: releng/6.4/sys/kern/subr_rman.c 173288 2007-11-02 19:00:06Z jhb $");
60
61 #define __RMAN_RESOURCE_VISIBLE
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mutex.h>
69 #include <sys/bus.h> /* XXX debugging */
70 #include <machine/bus.h>
71 #include <sys/rman.h>
72 #include <sys/sysctl.h>
73
74 int rman_debug = 0;
75 TUNABLE_INT("debug.rman_debug", &rman_debug);
76 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
77 &rman_debug, 0, "rman debug");
78
79 #define DPRINTF(params) if (rman_debug) printf params
80
81 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
82
83 struct rman_head rman_head;
84 static struct mtx rman_mtx; /* mutex to protect rman_head */
85 static int int_rman_activate_resource(struct rman *rm, struct resource *r,
86 struct resource **whohas);
87 static int int_rman_deactivate_resource(struct resource *r);
88 static int int_rman_release_resource(struct rman *rm, struct resource *r);
89
90 int
91 rman_init(struct rman *rm)
92 {
93 static int once = 0;
94
95 if (once == 0) {
96 once = 1;
97 TAILQ_INIT(&rman_head);
98 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
99 }
100
101 if (rm->rm_type == RMAN_UNINIT)
102 panic("rman_init");
103 if (rm->rm_type == RMAN_GAUGE)
104 panic("implement RMAN_GAUGE");
105
106 TAILQ_INIT(&rm->rm_list);
107 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
108 if (rm->rm_mtx == NULL)
109 return ENOMEM;
110 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
111
112 mtx_lock(&rman_mtx);
113 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
114 mtx_unlock(&rman_mtx);
115 return 0;
116 }
117
118 /*
119 * NB: this interface is not robust against programming errors which
120 * add multiple copies of the same region.
121 */
122 int
123 rman_manage_region(struct rman *rm, u_long start, u_long end)
124 {
125 struct resource *r, *s, *t;
126
127 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
128 rm->rm_descr, start, end));
129 r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
130 if (r == NULL)
131 return ENOMEM;
132 r->r_start = start;
133 r->r_end = end;
134 r->r_rm = rm;
135
136 mtx_lock(rm->rm_mtx);
137
138 /* Skip entries before us. */
139 TAILQ_FOREACH(s, &rm->rm_list, r_link) {
140 if (s->r_end == ULONG_MAX)
141 break;
142 if (s->r_end + 1 >= r->r_start)
143 break;
144 }
145
146 /* If we ran off the end of the list, insert at the tail. */
147 if (s == NULL) {
148 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
149 } else {
150 /* Check for any overlap with the current region. */
151 if (r->r_start <= s->r_end && r->r_end >= s->r_start)
152 return EBUSY;
153
154 /* Check for any overlap with the next region. */
155 t = TAILQ_NEXT(s, r_link);
156 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
157 return EBUSY;
158
159 /*
160 * See if this region can be merged with the next region. If
161 * not, clear the pointer.
162 */
163 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
164 t = NULL;
165
166 /* See if we can merge with the current region. */
167 if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
168 /* Can we merge all 3 regions? */
169 if (t != NULL) {
170 s->r_end = t->r_end;
171 TAILQ_REMOVE(&rm->rm_list, t, r_link);
172 free(r, M_RMAN);
173 free(t, M_RMAN);
174 } else {
175 s->r_end = r->r_end;
176 free(r, M_RMAN);
177 }
178 } else if (t != NULL) {
179 /* Can we merge with just the next region? */
180 t->r_start = r->r_start;
181 free(r, M_RMAN);
182 } else if (s->r_end < r->r_start) {
183 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
184 } else {
185 TAILQ_INSERT_BEFORE(s, r, r_link);
186 }
187 }
188
189 mtx_unlock(rm->rm_mtx);
190 return 0;
191 }
192
193 int
194 rman_fini(struct rman *rm)
195 {
196 struct resource *r;
197
198 mtx_lock(rm->rm_mtx);
199 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
200 if (r->r_flags & RF_ALLOCATED) {
201 mtx_unlock(rm->rm_mtx);
202 return EBUSY;
203 }
204 }
205
206 /*
207 * There really should only be one of these if we are in this
208 * state and the code is working properly, but it can't hurt.
209 */
210 while (!TAILQ_EMPTY(&rm->rm_list)) {
211 r = TAILQ_FIRST(&rm->rm_list);
212 TAILQ_REMOVE(&rm->rm_list, r, r_link);
213 free(r, M_RMAN);
214 }
215 mtx_unlock(rm->rm_mtx);
216 mtx_lock(&rman_mtx);
217 TAILQ_REMOVE(&rman_head, rm, rm_link);
218 mtx_unlock(&rman_mtx);
219 mtx_destroy(rm->rm_mtx);
220 free(rm->rm_mtx, M_RMAN);
221
222 return 0;
223 }
224
225 struct resource *
226 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
227 u_long count, u_long bound, u_int flags,
228 struct device *dev)
229 {
230 u_int want_activate;
231 struct resource *r, *s, *rv;
232 u_long rstart, rend, amask, bmask;
233
234 rv = NULL;
235
236 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
237 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
238 flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
239 want_activate = (flags & RF_ACTIVE);
240 flags &= ~RF_ACTIVE;
241
242 mtx_lock(rm->rm_mtx);
243
244 for (r = TAILQ_FIRST(&rm->rm_list);
245 r && r->r_end < start;
246 r = TAILQ_NEXT(r, r_link))
247 ;
248
249 if (r == NULL) {
250 DPRINTF(("could not find a region\n"));
251 goto out;
252 }
253
254 amask = (1ul << RF_ALIGNMENT(flags)) - 1;
255 /* If bound is 0, bmask will also be 0 */
256 bmask = ~(bound - 1);
257 /*
258 * First try to find an acceptable totally-unshared region.
259 */
260 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
261 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
262 if (s->r_start + count - 1 > end) {
263 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
264 s->r_start, end));
265 break;
266 }
267 if (s->r_flags & RF_ALLOCATED) {
268 DPRINTF(("region is allocated\n"));
269 continue;
270 }
271 rstart = ulmax(s->r_start, start);
272 /*
273 * Try to find a region by adjusting to boundary and alignment
274 * until both conditions are satisfied. This is not an optimal
275 * algorithm, but in most cases it isn't really bad, either.
276 */
277 do {
278 rstart = (rstart + amask) & ~amask;
279 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
280 rstart += bound - (rstart & ~bmask);
281 } while ((rstart & amask) != 0 && rstart < end &&
282 rstart < s->r_end);
283 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
284 if (rstart > rend) {
285 DPRINTF(("adjusted start exceeds end\n"));
286 continue;
287 }
288 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
289 rstart, rend, (rend - rstart + 1), count));
290
291 if ((rend - rstart + 1) >= count) {
292 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
293 rstart, rend, (rend - rstart + 1)));
294 if ((s->r_end - s->r_start + 1) == count) {
295 DPRINTF(("candidate region is entire chunk\n"));
296 rv = s;
297 rv->r_flags |= RF_ALLOCATED | flags;
298 rv->r_dev = dev;
299 goto out;
300 }
301
302 /*
303 * If s->r_start < rstart and
304 * s->r_end > rstart + count - 1, then
305 * we need to split the region into three pieces
306 * (the middle one will get returned to the user).
307 * Otherwise, we are allocating at either the
308 * beginning or the end of s, so we only need to
309 * split it in two. The first case requires
310 * two new allocations; the second requires but one.
311 */
312 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
313 if (rv == NULL)
314 goto out;
315 rv->r_start = rstart;
316 rv->r_end = rstart + count - 1;
317 rv->r_flags = flags | RF_ALLOCATED;
318 rv->r_dev = dev;
319 rv->r_rm = rm;
320
321 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
322 DPRINTF(("splitting region in three parts: "
323 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
324 s->r_start, rv->r_start - 1,
325 rv->r_start, rv->r_end,
326 rv->r_end + 1, s->r_end));
327 /*
328 * We are allocating in the middle.
329 */
330 r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
331 if (r == NULL) {
332 free(rv, M_RMAN);
333 rv = NULL;
334 goto out;
335 }
336 r->r_start = rv->r_end + 1;
337 r->r_end = s->r_end;
338 r->r_flags = s->r_flags;
339 r->r_rm = rm;
340 s->r_end = rv->r_start - 1;
341 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
342 r_link);
343 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
344 r_link);
345 } else if (s->r_start == rv->r_start) {
346 DPRINTF(("allocating from the beginning\n"));
347 /*
348 * We are allocating at the beginning.
349 */
350 s->r_start = rv->r_end + 1;
351 TAILQ_INSERT_BEFORE(s, rv, r_link);
352 } else {
353 DPRINTF(("allocating at the end\n"));
354 /*
355 * We are allocating at the end.
356 */
357 s->r_end = rv->r_start - 1;
358 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
359 r_link);
360 }
361 goto out;
362 }
363 }
364
365 /*
366 * Now find an acceptable shared region, if the client's requirements
367 * allow sharing. By our implementation restriction, a candidate
368 * region must match exactly by both size and sharing type in order
369 * to be considered compatible with the client's request. (The
370 * former restriction could probably be lifted without too much
371 * additional work, but this does not seem warranted.)
372 */
373 DPRINTF(("no unshared regions found\n"));
374 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
375 goto out;
376
377 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
378 if (s->r_start > end)
379 break;
380 if ((s->r_flags & flags) != flags)
381 continue;
382 rstart = ulmax(s->r_start, start);
383 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
384 if (s->r_start >= start && s->r_end <= end
385 && (s->r_end - s->r_start + 1) == count &&
386 (s->r_start & amask) == 0 &&
387 ((s->r_start ^ s->r_end) & bmask) == 0) {
388 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
389 if (rv == NULL)
390 goto out;
391 rv->r_start = s->r_start;
392 rv->r_end = s->r_end;
393 rv->r_flags = s->r_flags &
394 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
395 rv->r_dev = dev;
396 rv->r_rm = rm;
397 if (s->r_sharehead == NULL) {
398 s->r_sharehead = malloc(sizeof *s->r_sharehead,
399 M_RMAN, M_NOWAIT | M_ZERO);
400 if (s->r_sharehead == NULL) {
401 free(rv, M_RMAN);
402 rv = NULL;
403 goto out;
404 }
405 LIST_INIT(s->r_sharehead);
406 LIST_INSERT_HEAD(s->r_sharehead, s,
407 r_sharelink);
408 s->r_flags |= RF_FIRSTSHARE;
409 }
410 rv->r_sharehead = s->r_sharehead;
411 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
412 goto out;
413 }
414 }
415
416 /*
417 * We couldn't find anything.
418 */
419 out:
420 /*
421 * If the user specified RF_ACTIVE in the initial flags,
422 * which is reflected in `want_activate', we attempt to atomically
423 * activate the resource. If this fails, we release the resource
424 * and indicate overall failure. (This behavior probably doesn't
425 * make sense for RF_TIMESHARE-type resources.)
426 */
427 if (rv && want_activate) {
428 struct resource *whohas;
429 if (int_rman_activate_resource(rm, rv, &whohas)) {
430 int_rman_release_resource(rm, rv);
431 rv = NULL;
432 }
433 }
434
435 mtx_unlock(rm->rm_mtx);
436 return (rv);
437 }
438
439 struct resource *
440 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
441 u_int flags, struct device *dev)
442 {
443
444 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
445 dev));
446 }
447
448 static int
449 int_rman_activate_resource(struct rman *rm, struct resource *r,
450 struct resource **whohas)
451 {
452 struct resource *s;
453 int ok;
454
455 /*
456 * If we are not timesharing, then there is nothing much to do.
457 * If we already have the resource, then there is nothing at all to do.
458 * If we are not on a sharing list with anybody else, then there is
459 * little to do.
460 */
461 if ((r->r_flags & RF_TIMESHARE) == 0
462 || (r->r_flags & RF_ACTIVE) != 0
463 || r->r_sharehead == NULL) {
464 r->r_flags |= RF_ACTIVE;
465 return 0;
466 }
467
468 ok = 1;
469 for (s = LIST_FIRST(r->r_sharehead); s && ok;
470 s = LIST_NEXT(s, r_sharelink)) {
471 if ((s->r_flags & RF_ACTIVE) != 0) {
472 ok = 0;
473 *whohas = s;
474 }
475 }
476 if (ok) {
477 r->r_flags |= RF_ACTIVE;
478 return 0;
479 }
480 return EBUSY;
481 }
482
483 int
484 rman_activate_resource(struct resource *r)
485 {
486 int rv;
487 struct resource *whohas;
488 struct rman *rm;
489
490 rm = r->r_rm;
491 mtx_lock(rm->rm_mtx);
492 rv = int_rman_activate_resource(rm, r, &whohas);
493 mtx_unlock(rm->rm_mtx);
494 return rv;
495 }
496
497 int
498 rman_await_resource(struct resource *r, int pri, int timo)
499 {
500 int rv;
501 struct resource *whohas;
502 struct rman *rm;
503
504 rm = r->r_rm;
505 mtx_lock(rm->rm_mtx);
506 for (;;) {
507 rv = int_rman_activate_resource(rm, r, &whohas);
508 if (rv != EBUSY)
509 return (rv); /* returns with mutex held */
510
511 if (r->r_sharehead == NULL)
512 panic("rman_await_resource");
513 whohas->r_flags |= RF_WANTED;
514 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
515 if (rv) {
516 mtx_unlock(rm->rm_mtx);
517 return (rv);
518 }
519 }
520 }
521
522 static int
523 int_rman_deactivate_resource(struct resource *r)
524 {
525
526 r->r_flags &= ~RF_ACTIVE;
527 if (r->r_flags & RF_WANTED) {
528 r->r_flags &= ~RF_WANTED;
529 wakeup(r->r_sharehead);
530 }
531 return 0;
532 }
533
534 int
535 rman_deactivate_resource(struct resource *r)
536 {
537 struct rman *rm;
538
539 rm = r->r_rm;
540 mtx_lock(rm->rm_mtx);
541 int_rman_deactivate_resource(r);
542 mtx_unlock(rm->rm_mtx);
543 return 0;
544 }
545
546 static int
547 int_rman_release_resource(struct rman *rm, struct resource *r)
548 {
549 struct resource *s, *t;
550
551 if (r->r_flags & RF_ACTIVE)
552 int_rman_deactivate_resource(r);
553
554 /*
555 * Check for a sharing list first. If there is one, then we don't
556 * have to think as hard.
557 */
558 if (r->r_sharehead) {
559 /*
560 * If a sharing list exists, then we know there are at
561 * least two sharers.
562 *
563 * If we are in the main circleq, appoint someone else.
564 */
565 LIST_REMOVE(r, r_sharelink);
566 s = LIST_FIRST(r->r_sharehead);
567 if (r->r_flags & RF_FIRSTSHARE) {
568 s->r_flags |= RF_FIRSTSHARE;
569 TAILQ_INSERT_BEFORE(r, s, r_link);
570 TAILQ_REMOVE(&rm->rm_list, r, r_link);
571 }
572
573 /*
574 * Make sure that the sharing list goes away completely
575 * if the resource is no longer being shared at all.
576 */
577 if (LIST_NEXT(s, r_sharelink) == NULL) {
578 free(s->r_sharehead, M_RMAN);
579 s->r_sharehead = NULL;
580 s->r_flags &= ~RF_FIRSTSHARE;
581 }
582 goto out;
583 }
584
585 /*
586 * Look at the adjacent resources in the list and see if our
587 * segment can be merged with any of them. If either of the
588 * resources is allocated or is not exactly adjacent then they
589 * cannot be merged with our segment.
590 */
591 s = TAILQ_PREV(r, resource_head, r_link);
592 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
593 s->r_end + 1 != r->r_start))
594 s = NULL;
595 t = TAILQ_NEXT(r, r_link);
596 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
597 r->r_end + 1 != t->r_start))
598 t = NULL;
599
600 if (s != NULL && t != NULL) {
601 /*
602 * Merge all three segments.
603 */
604 s->r_end = t->r_end;
605 TAILQ_REMOVE(&rm->rm_list, r, r_link);
606 TAILQ_REMOVE(&rm->rm_list, t, r_link);
607 free(t, M_RMAN);
608 } else if (s != NULL) {
609 /*
610 * Merge previous segment with ours.
611 */
612 s->r_end = r->r_end;
613 TAILQ_REMOVE(&rm->rm_list, r, r_link);
614 } else if (t != NULL) {
615 /*
616 * Merge next segment with ours.
617 */
618 t->r_start = r->r_start;
619 TAILQ_REMOVE(&rm->rm_list, r, r_link);
620 } else {
621 /*
622 * At this point, we know there is nothing we
623 * can potentially merge with, because on each
624 * side, there is either nothing there or what is
625 * there is still allocated. In that case, we don't
626 * want to remove r from the list; we simply want to
627 * change it to an unallocated region and return
628 * without freeing anything.
629 */
630 r->r_flags &= ~RF_ALLOCATED;
631 return 0;
632 }
633
634 out:
635 free(r, M_RMAN);
636 return 0;
637 }
638
639 int
640 rman_release_resource(struct resource *r)
641 {
642 int rv;
643 struct rman *rm = r->r_rm;
644
645 mtx_lock(rm->rm_mtx);
646 rv = int_rman_release_resource(rm, r);
647 mtx_unlock(rm->rm_mtx);
648 return (rv);
649 }
650
651 uint32_t
652 rman_make_alignment_flags(uint32_t size)
653 {
654 int i;
655
656 /*
657 * Find the hightest bit set, and add one if more than one bit
658 * set. We're effectively computing the ceil(log2(size)) here.
659 */
660 for (i = 31; i > 0; i--)
661 if ((1 << i) & size)
662 break;
663 if (~(1 << i) & size)
664 i++;
665
666 return(RF_ALIGNMENT_LOG2(i));
667 }
668
669 u_long
670 rman_get_start(struct resource *r)
671 {
672 return (r->r_start);
673 }
674
675 u_long
676 rman_get_end(struct resource *r)
677 {
678 return (r->r_end);
679 }
680
681 u_long
682 rman_get_size(struct resource *r)
683 {
684 return (r->r_end - r->r_start + 1);
685 }
686
687 u_int
688 rman_get_flags(struct resource *r)
689 {
690 return (r->r_flags);
691 }
692
693 void
694 rman_set_virtual(struct resource *r, void *v)
695 {
696 r->r_virtual = v;
697 }
698
699 void *
700 rman_get_virtual(struct resource *r)
701 {
702 return (r->r_virtual);
703 }
704
705 void
706 rman_set_bustag(struct resource *r, bus_space_tag_t t)
707 {
708 r->r_bustag = t;
709 }
710
711 bus_space_tag_t
712 rman_get_bustag(struct resource *r)
713 {
714 return (r->r_bustag);
715 }
716
717 void
718 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
719 {
720 r->r_bushandle = h;
721 }
722
723 bus_space_handle_t
724 rman_get_bushandle(struct resource *r)
725 {
726 return (r->r_bushandle);
727 }
728
729 void
730 rman_set_rid(struct resource *r, int rid)
731 {
732 r->r_rid = rid;
733 }
734
735 void
736 rman_set_start(struct resource *r, u_long start)
737 {
738 r->r_start = start;
739 }
740
741 void
742 rman_set_end(struct resource *r, u_long end)
743 {
744 r->r_end = end;
745 }
746
747 int
748 rman_get_rid(struct resource *r)
749 {
750 return (r->r_rid);
751 }
752
753 struct device *
754 rman_get_device(struct resource *r)
755 {
756 return (r->r_dev);
757 }
758
759 void
760 rman_set_device(struct resource *r, struct device *dev)
761 {
762 r->r_dev = dev;
763 }
764
765 /*
766 * Sysctl interface for scanning the resource lists.
767 *
768 * We take two input parameters; the index into the list of resource
769 * managers, and the resource offset into the list.
770 */
771 static int
772 sysctl_rman(SYSCTL_HANDLER_ARGS)
773 {
774 int *name = (int *)arg1;
775 u_int namelen = arg2;
776 int rman_idx, res_idx;
777 struct rman *rm;
778 struct resource *res;
779 struct u_rman urm;
780 struct u_resource ures;
781 int error;
782
783 if (namelen != 3)
784 return (EINVAL);
785
786 if (bus_data_generation_check(name[0]))
787 return (EINVAL);
788 rman_idx = name[1];
789 res_idx = name[2];
790
791 /*
792 * Find the indexed resource manager
793 */
794 mtx_lock(&rman_mtx);
795 TAILQ_FOREACH(rm, &rman_head, rm_link) {
796 if (rman_idx-- == 0)
797 break;
798 }
799 mtx_unlock(&rman_mtx);
800 if (rm == NULL)
801 return (ENOENT);
802
803 /*
804 * If the resource index is -1, we want details on the
805 * resource manager.
806 */
807 if (res_idx == -1) {
808 bzero(&urm, sizeof(urm));
809 urm.rm_handle = (uintptr_t)rm;
810 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
811 urm.rm_start = rm->rm_start;
812 urm.rm_size = rm->rm_end - rm->rm_start + 1;
813 urm.rm_type = rm->rm_type;
814
815 error = SYSCTL_OUT(req, &urm, sizeof(urm));
816 return (error);
817 }
818
819 /*
820 * Find the indexed resource and return it.
821 */
822 mtx_lock(&rman_mtx);
823 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
824 if (res_idx-- == 0) {
825 bzero(&ures, sizeof(ures));
826 ures.r_handle = (uintptr_t)res;
827 ures.r_parent = (uintptr_t)res->r_rm;
828 ures.r_device = (uintptr_t)res->r_dev;
829 if (res->r_dev != NULL) {
830 if (device_get_name(res->r_dev) != NULL) {
831 snprintf(ures.r_devname, RM_TEXTLEN,
832 "%s%d",
833 device_get_name(res->r_dev),
834 device_get_unit(res->r_dev));
835 } else {
836 strlcpy(ures.r_devname, "nomatch",
837 RM_TEXTLEN);
838 }
839 } else {
840 ures.r_devname[0] = '\0';
841 }
842 ures.r_start = res->r_start;
843 ures.r_size = res->r_end - res->r_start + 1;
844 ures.r_flags = res->r_flags;
845
846 mtx_unlock(&rman_mtx);
847 error = SYSCTL_OUT(req, &ures, sizeof(ures));
848 return (error);
849 }
850 }
851 mtx_unlock(&rman_mtx);
852 return (ENOENT);
853 }
854
855 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
856 "kernel resource manager");
Cache object: 965da99c182a1686520b358e2dd4677a
|