FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_rman.c
1 /*-
2 * Copyright 1998 Massachusetts Institute of Technology
3 *
4 * Permission to use, copy, modify, and distribute this software and
5 * its documentation for any purpose and without fee is hereby
6 * granted, provided that both the above copyright notice and this
7 * permission notice appear in all copies, that both the above
8 * copyright notice and this permission notice appear in all
9 * supporting documentation, and that the name of M.I.T. not be used
10 * in advertising or publicity pertaining to distribution of the
11 * software without specific, written prior permission. M.I.T. makes
12 * no representations about the suitability of this software for any
13 * purpose. It is provided "as is" without express or implied
14 * warranty.
15 *
16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * The kernel resource manager. This code is responsible for keeping track
32 * of hardware resources which are apportioned out to various drivers.
33 * It does not actually assign those resources, and it is not expected
34 * that end-device drivers will call into this code directly. Rather,
35 * the code which implements the buses that those devices are attached to,
36 * and the code which manages CPU resources, will call this code, and the
37 * end-device drivers will make upcalls to that code to actually perform
38 * the allocation.
39 *
40 * There are two sorts of resources managed by this code. The first is
41 * the more familiar array (RMAN_ARRAY) type; resources in this class
42 * consist of a sequence of individually-allocatable objects which have
43 * been numbered in some well-defined order. Most of the resources
44 * are of this type, as it is the most familiar. The second type is
45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46 * resources in which each instance is indistinguishable from every
47 * other instance). The principal anticipated application of gauges
48 * is in the context of power consumption, where a bus may have a specific
49 * power budget which all attached devices share. RMAN_GAUGE is not
50 * implemented yet.
51 *
52 * For array resources, we make one simplifying assumption: two clients
53 * sharing the same resource must use the same range of indices. That
54 * is to say, sharing of overlapping-but-not-identical regions is not
55 * permitted.
56 */
57
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD: releng/6.1/sys/kern/subr_rman.c 158179 2006-04-30 16:44:43Z cvs2svn $");
60
61 #define __RMAN_RESOURCE_VISIBLE
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/lock.h>
66 #include <sys/malloc.h>
67 #include <sys/mutex.h>
68 #include <sys/bus.h> /* XXX debugging */
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <sys/sysctl.h>
72
73 int rman_debug = 0;
74 TUNABLE_INT("debug.rman_debug", &rman_debug);
75 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
76 &rman_debug, 0, "rman debug");
77
78 #define DPRINTF(params) if (rman_debug) printf params
79
80 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
81
82 struct rman_head rman_head;
83 static struct mtx rman_mtx; /* mutex to protect rman_head */
84 static int int_rman_activate_resource(struct rman *rm, struct resource *r,
85 struct resource **whohas);
86 static int int_rman_deactivate_resource(struct resource *r);
87 static int int_rman_release_resource(struct rman *rm, struct resource *r);
88
89 int
90 rman_init(struct rman *rm)
91 {
92 static int once = 0;
93
94 if (once == 0) {
95 once = 1;
96 TAILQ_INIT(&rman_head);
97 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
98 }
99
100 if (rm->rm_type == RMAN_UNINIT)
101 panic("rman_init");
102 if (rm->rm_type == RMAN_GAUGE)
103 panic("implement RMAN_GAUGE");
104
105 TAILQ_INIT(&rm->rm_list);
106 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
107 if (rm->rm_mtx == NULL)
108 return ENOMEM;
109 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
110
111 mtx_lock(&rman_mtx);
112 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
113 mtx_unlock(&rman_mtx);
114 return 0;
115 }
116
117 /*
118 * NB: this interface is not robust against programming errors which
119 * add multiple copies of the same region.
120 */
121 int
122 rman_manage_region(struct rman *rm, u_long start, u_long end)
123 {
124 struct resource *r, *s;
125
126 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
127 rm->rm_descr, start, end));
128 r = malloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
129 if (r == NULL)
130 return ENOMEM;
131 r->r_start = start;
132 r->r_end = end;
133 r->r_rm = rm;
134
135 mtx_lock(rm->rm_mtx);
136 for (s = TAILQ_FIRST(&rm->rm_list);
137 s && s->r_end < r->r_start;
138 s = TAILQ_NEXT(s, r_link))
139 ;
140
141 if (s == NULL) {
142 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
143 } else {
144 TAILQ_INSERT_BEFORE(s, r, r_link);
145 }
146
147 mtx_unlock(rm->rm_mtx);
148 return 0;
149 }
150
151 int
152 rman_fini(struct rman *rm)
153 {
154 struct resource *r;
155
156 mtx_lock(rm->rm_mtx);
157 TAILQ_FOREACH(r, &rm->rm_list, r_link) {
158 if (r->r_flags & RF_ALLOCATED) {
159 mtx_unlock(rm->rm_mtx);
160 return EBUSY;
161 }
162 }
163
164 /*
165 * There really should only be one of these if we are in this
166 * state and the code is working properly, but it can't hurt.
167 */
168 while (!TAILQ_EMPTY(&rm->rm_list)) {
169 r = TAILQ_FIRST(&rm->rm_list);
170 TAILQ_REMOVE(&rm->rm_list, r, r_link);
171 free(r, M_RMAN);
172 }
173 mtx_unlock(rm->rm_mtx);
174 mtx_lock(&rman_mtx);
175 TAILQ_REMOVE(&rman_head, rm, rm_link);
176 mtx_unlock(&rman_mtx);
177 mtx_destroy(rm->rm_mtx);
178 free(rm->rm_mtx, M_RMAN);
179
180 return 0;
181 }
182
183 struct resource *
184 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
185 u_long count, u_long bound, u_int flags,
186 struct device *dev)
187 {
188 u_int want_activate;
189 struct resource *r, *s, *rv;
190 u_long rstart, rend, amask, bmask;
191
192 rv = NULL;
193
194 DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
195 "%#lx, flags %u, device %s\n", rm->rm_descr, start, end, count,
196 flags, dev == NULL ? "<null>" : device_get_nameunit(dev)));
197 want_activate = (flags & RF_ACTIVE);
198 flags &= ~RF_ACTIVE;
199
200 mtx_lock(rm->rm_mtx);
201
202 for (r = TAILQ_FIRST(&rm->rm_list);
203 r && r->r_end < start;
204 r = TAILQ_NEXT(r, r_link))
205 ;
206
207 if (r == NULL) {
208 DPRINTF(("could not find a region\n"));
209 goto out;
210 }
211
212 amask = (1ul << RF_ALIGNMENT(flags)) - 1;
213 /* If bound is 0, bmask will also be 0 */
214 bmask = ~(bound - 1);
215 /*
216 * First try to find an acceptable totally-unshared region.
217 */
218 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
219 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
220 if (s->r_start + count - 1 > end) {
221 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
222 s->r_start, end));
223 break;
224 }
225 if (s->r_flags & RF_ALLOCATED) {
226 DPRINTF(("region is allocated\n"));
227 continue;
228 }
229 rstart = ulmax(s->r_start, start);
230 /*
231 * Try to find a region by adjusting to boundary and alignment
232 * until both conditions are satisfied. This is not an optimal
233 * algorithm, but in most cases it isn't really bad, either.
234 */
235 do {
236 rstart = (rstart + amask) & ~amask;
237 if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
238 rstart += bound - (rstart & ~bmask);
239 } while ((rstart & amask) != 0 && rstart < end &&
240 rstart < s->r_end);
241 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
242 if (rstart > rend) {
243 DPRINTF(("adjusted start exceeds end\n"));
244 continue;
245 }
246 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
247 rstart, rend, (rend - rstart + 1), count));
248
249 if ((rend - rstart + 1) >= count) {
250 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
251 rstart, rend, (rend - rstart + 1)));
252 if ((s->r_end - s->r_start + 1) == count) {
253 DPRINTF(("candidate region is entire chunk\n"));
254 rv = s;
255 rv->r_flags |= RF_ALLOCATED | flags;
256 rv->r_dev = dev;
257 goto out;
258 }
259
260 /*
261 * If s->r_start < rstart and
262 * s->r_end > rstart + count - 1, then
263 * we need to split the region into three pieces
264 * (the middle one will get returned to the user).
265 * Otherwise, we are allocating at either the
266 * beginning or the end of s, so we only need to
267 * split it in two. The first case requires
268 * two new allocations; the second requires but one.
269 */
270 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
271 if (rv == NULL)
272 goto out;
273 rv->r_start = rstart;
274 rv->r_end = rstart + count - 1;
275 rv->r_flags = flags | RF_ALLOCATED;
276 rv->r_dev = dev;
277 rv->r_rm = rm;
278
279 if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
280 DPRINTF(("splitting region in three parts: "
281 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
282 s->r_start, rv->r_start - 1,
283 rv->r_start, rv->r_end,
284 rv->r_end + 1, s->r_end));
285 /*
286 * We are allocating in the middle.
287 */
288 r = malloc(sizeof *r, M_RMAN, M_NOWAIT|M_ZERO);
289 if (r == NULL) {
290 free(rv, M_RMAN);
291 rv = NULL;
292 goto out;
293 }
294 r->r_start = rv->r_end + 1;
295 r->r_end = s->r_end;
296 r->r_flags = s->r_flags;
297 r->r_rm = rm;
298 s->r_end = rv->r_start - 1;
299 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
300 r_link);
301 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
302 r_link);
303 } else if (s->r_start == rv->r_start) {
304 DPRINTF(("allocating from the beginning\n"));
305 /*
306 * We are allocating at the beginning.
307 */
308 s->r_start = rv->r_end + 1;
309 TAILQ_INSERT_BEFORE(s, rv, r_link);
310 } else {
311 DPRINTF(("allocating at the end\n"));
312 /*
313 * We are allocating at the end.
314 */
315 s->r_end = rv->r_start - 1;
316 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
317 r_link);
318 }
319 goto out;
320 }
321 }
322
323 /*
324 * Now find an acceptable shared region, if the client's requirements
325 * allow sharing. By our implementation restriction, a candidate
326 * region must match exactly by both size and sharing type in order
327 * to be considered compatible with the client's request. (The
328 * former restriction could probably be lifted without too much
329 * additional work, but this does not seem warranted.)
330 */
331 DPRINTF(("no unshared regions found\n"));
332 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
333 goto out;
334
335 for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
336 if (s->r_start > end)
337 break;
338 if ((s->r_flags & flags) != flags)
339 continue;
340 rstart = ulmax(s->r_start, start);
341 rend = ulmin(s->r_end, ulmax(start + count - 1, end));
342 if (s->r_start >= start && s->r_end <= end
343 && (s->r_end - s->r_start + 1) == count &&
344 (s->r_start & amask) == 0 &&
345 ((s->r_start ^ s->r_end) & bmask) == 0) {
346 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
347 if (rv == NULL)
348 goto out;
349 rv->r_start = s->r_start;
350 rv->r_end = s->r_end;
351 rv->r_flags = s->r_flags &
352 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
353 rv->r_dev = dev;
354 rv->r_rm = rm;
355 if (s->r_sharehead == NULL) {
356 s->r_sharehead = malloc(sizeof *s->r_sharehead,
357 M_RMAN, M_NOWAIT | M_ZERO);
358 if (s->r_sharehead == NULL) {
359 free(rv, M_RMAN);
360 rv = NULL;
361 goto out;
362 }
363 LIST_INIT(s->r_sharehead);
364 LIST_INSERT_HEAD(s->r_sharehead, s,
365 r_sharelink);
366 s->r_flags |= RF_FIRSTSHARE;
367 }
368 rv->r_sharehead = s->r_sharehead;
369 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
370 goto out;
371 }
372 }
373
374 /*
375 * We couldn't find anything.
376 */
377 out:
378 /*
379 * If the user specified RF_ACTIVE in the initial flags,
380 * which is reflected in `want_activate', we attempt to atomically
381 * activate the resource. If this fails, we release the resource
382 * and indicate overall failure. (This behavior probably doesn't
383 * make sense for RF_TIMESHARE-type resources.)
384 */
385 if (rv && want_activate) {
386 struct resource *whohas;
387 if (int_rman_activate_resource(rm, rv, &whohas)) {
388 int_rman_release_resource(rm, rv);
389 rv = NULL;
390 }
391 }
392
393 mtx_unlock(rm->rm_mtx);
394 return (rv);
395 }
396
397 struct resource *
398 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
399 u_int flags, struct device *dev)
400 {
401
402 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
403 dev));
404 }
405
406 static int
407 int_rman_activate_resource(struct rman *rm, struct resource *r,
408 struct resource **whohas)
409 {
410 struct resource *s;
411 int ok;
412
413 /*
414 * If we are not timesharing, then there is nothing much to do.
415 * If we already have the resource, then there is nothing at all to do.
416 * If we are not on a sharing list with anybody else, then there is
417 * little to do.
418 */
419 if ((r->r_flags & RF_TIMESHARE) == 0
420 || (r->r_flags & RF_ACTIVE) != 0
421 || r->r_sharehead == NULL) {
422 r->r_flags |= RF_ACTIVE;
423 return 0;
424 }
425
426 ok = 1;
427 for (s = LIST_FIRST(r->r_sharehead); s && ok;
428 s = LIST_NEXT(s, r_sharelink)) {
429 if ((s->r_flags & RF_ACTIVE) != 0) {
430 ok = 0;
431 *whohas = s;
432 }
433 }
434 if (ok) {
435 r->r_flags |= RF_ACTIVE;
436 return 0;
437 }
438 return EBUSY;
439 }
440
441 int
442 rman_activate_resource(struct resource *r)
443 {
444 int rv;
445 struct resource *whohas;
446 struct rman *rm;
447
448 rm = r->r_rm;
449 mtx_lock(rm->rm_mtx);
450 rv = int_rman_activate_resource(rm, r, &whohas);
451 mtx_unlock(rm->rm_mtx);
452 return rv;
453 }
454
455 int
456 rman_await_resource(struct resource *r, int pri, int timo)
457 {
458 int rv;
459 struct resource *whohas;
460 struct rman *rm;
461
462 rm = r->r_rm;
463 mtx_lock(rm->rm_mtx);
464 for (;;) {
465 rv = int_rman_activate_resource(rm, r, &whohas);
466 if (rv != EBUSY)
467 return (rv); /* returns with mutex held */
468
469 if (r->r_sharehead == NULL)
470 panic("rman_await_resource");
471 whohas->r_flags |= RF_WANTED;
472 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
473 if (rv) {
474 mtx_unlock(rm->rm_mtx);
475 return (rv);
476 }
477 }
478 }
479
480 static int
481 int_rman_deactivate_resource(struct resource *r)
482 {
483
484 r->r_flags &= ~RF_ACTIVE;
485 if (r->r_flags & RF_WANTED) {
486 r->r_flags &= ~RF_WANTED;
487 wakeup(r->r_sharehead);
488 }
489 return 0;
490 }
491
492 int
493 rman_deactivate_resource(struct resource *r)
494 {
495 struct rman *rm;
496
497 rm = r->r_rm;
498 mtx_lock(rm->rm_mtx);
499 int_rman_deactivate_resource(r);
500 mtx_unlock(rm->rm_mtx);
501 return 0;
502 }
503
504 static int
505 int_rman_release_resource(struct rman *rm, struct resource *r)
506 {
507 struct resource *s, *t;
508
509 if (r->r_flags & RF_ACTIVE)
510 int_rman_deactivate_resource(r);
511
512 /*
513 * Check for a sharing list first. If there is one, then we don't
514 * have to think as hard.
515 */
516 if (r->r_sharehead) {
517 /*
518 * If a sharing list exists, then we know there are at
519 * least two sharers.
520 *
521 * If we are in the main circleq, appoint someone else.
522 */
523 LIST_REMOVE(r, r_sharelink);
524 s = LIST_FIRST(r->r_sharehead);
525 if (r->r_flags & RF_FIRSTSHARE) {
526 s->r_flags |= RF_FIRSTSHARE;
527 TAILQ_INSERT_BEFORE(r, s, r_link);
528 TAILQ_REMOVE(&rm->rm_list, r, r_link);
529 }
530
531 /*
532 * Make sure that the sharing list goes away completely
533 * if the resource is no longer being shared at all.
534 */
535 if (LIST_NEXT(s, r_sharelink) == NULL) {
536 free(s->r_sharehead, M_RMAN);
537 s->r_sharehead = NULL;
538 s->r_flags &= ~RF_FIRSTSHARE;
539 }
540 goto out;
541 }
542
543 /*
544 * Look at the adjacent resources in the list and see if our
545 * segment can be merged with any of them. If either of the
546 * resources is allocated or is not exactly adjacent then they
547 * cannot be merged with our segment.
548 */
549 s = TAILQ_PREV(r, resource_head, r_link);
550 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
551 s->r_end + 1 != r->r_start))
552 s = NULL;
553 t = TAILQ_NEXT(r, r_link);
554 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
555 r->r_end + 1 != t->r_start))
556 t = NULL;
557
558 if (s != NULL && t != NULL) {
559 /*
560 * Merge all three segments.
561 */
562 s->r_end = t->r_end;
563 TAILQ_REMOVE(&rm->rm_list, r, r_link);
564 TAILQ_REMOVE(&rm->rm_list, t, r_link);
565 free(t, M_RMAN);
566 } else if (s != NULL) {
567 /*
568 * Merge previous segment with ours.
569 */
570 s->r_end = r->r_end;
571 TAILQ_REMOVE(&rm->rm_list, r, r_link);
572 } else if (t != NULL) {
573 /*
574 * Merge next segment with ours.
575 */
576 t->r_start = r->r_start;
577 TAILQ_REMOVE(&rm->rm_list, r, r_link);
578 } else {
579 /*
580 * At this point, we know there is nothing we
581 * can potentially merge with, because on each
582 * side, there is either nothing there or what is
583 * there is still allocated. In that case, we don't
584 * want to remove r from the list; we simply want to
585 * change it to an unallocated region and return
586 * without freeing anything.
587 */
588 r->r_flags &= ~RF_ALLOCATED;
589 return 0;
590 }
591
592 out:
593 free(r, M_RMAN);
594 return 0;
595 }
596
597 int
598 rman_release_resource(struct resource *r)
599 {
600 int rv;
601 struct rman *rm = r->r_rm;
602
603 mtx_lock(rm->rm_mtx);
604 rv = int_rman_release_resource(rm, r);
605 mtx_unlock(rm->rm_mtx);
606 return (rv);
607 }
608
609 uint32_t
610 rman_make_alignment_flags(uint32_t size)
611 {
612 int i;
613
614 /*
615 * Find the hightest bit set, and add one if more than one bit
616 * set. We're effectively computing the ceil(log2(size)) here.
617 */
618 for (i = 31; i > 0; i--)
619 if ((1 << i) & size)
620 break;
621 if (~(1 << i) & size)
622 i++;
623
624 return(RF_ALIGNMENT_LOG2(i));
625 }
626
627 u_long
628 rman_get_start(struct resource *r)
629 {
630 return (r->r_start);
631 }
632
633 u_long
634 rman_get_end(struct resource *r)
635 {
636 return (r->r_end);
637 }
638
639 u_long
640 rman_get_size(struct resource *r)
641 {
642 return (r->r_end - r->r_start + 1);
643 }
644
645 u_int
646 rman_get_flags(struct resource *r)
647 {
648 return (r->r_flags);
649 }
650
651 void
652 rman_set_virtual(struct resource *r, void *v)
653 {
654 r->r_virtual = v;
655 }
656
657 void *
658 rman_get_virtual(struct resource *r)
659 {
660 return (r->r_virtual);
661 }
662
663 void
664 rman_set_bustag(struct resource *r, bus_space_tag_t t)
665 {
666 r->r_bustag = t;
667 }
668
669 bus_space_tag_t
670 rman_get_bustag(struct resource *r)
671 {
672 return (r->r_bustag);
673 }
674
675 void
676 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
677 {
678 r->r_bushandle = h;
679 }
680
681 bus_space_handle_t
682 rman_get_bushandle(struct resource *r)
683 {
684 return (r->r_bushandle);
685 }
686
687 void
688 rman_set_rid(struct resource *r, int rid)
689 {
690 r->r_rid = rid;
691 }
692
693 void
694 rman_set_start(struct resource *r, u_long start)
695 {
696 r->r_start = start;
697 }
698
699 void
700 rman_set_end(struct resource *r, u_long end)
701 {
702 r->r_end = end;
703 }
704
705 int
706 rman_get_rid(struct resource *r)
707 {
708 return (r->r_rid);
709 }
710
711 struct device *
712 rman_get_device(struct resource *r)
713 {
714 return (r->r_dev);
715 }
716
717 void
718 rman_set_device(struct resource *r, struct device *dev)
719 {
720 r->r_dev = dev;
721 }
722
723 /*
724 * Sysctl interface for scanning the resource lists.
725 *
726 * We take two input parameters; the index into the list of resource
727 * managers, and the resource offset into the list.
728 */
729 static int
730 sysctl_rman(SYSCTL_HANDLER_ARGS)
731 {
732 int *name = (int *)arg1;
733 u_int namelen = arg2;
734 int rman_idx, res_idx;
735 struct rman *rm;
736 struct resource *res;
737 struct u_rman urm;
738 struct u_resource ures;
739 int error;
740
741 if (namelen != 3)
742 return (EINVAL);
743
744 if (bus_data_generation_check(name[0]))
745 return (EINVAL);
746 rman_idx = name[1];
747 res_idx = name[2];
748
749 /*
750 * Find the indexed resource manager
751 */
752 mtx_lock(&rman_mtx);
753 TAILQ_FOREACH(rm, &rman_head, rm_link) {
754 if (rman_idx-- == 0)
755 break;
756 }
757 mtx_unlock(&rman_mtx);
758 if (rm == NULL)
759 return (ENOENT);
760
761 /*
762 * If the resource index is -1, we want details on the
763 * resource manager.
764 */
765 if (res_idx == -1) {
766 bzero(&urm, sizeof(urm));
767 urm.rm_handle = (uintptr_t)rm;
768 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
769 urm.rm_start = rm->rm_start;
770 urm.rm_size = rm->rm_end - rm->rm_start + 1;
771 urm.rm_type = rm->rm_type;
772
773 error = SYSCTL_OUT(req, &urm, sizeof(urm));
774 return (error);
775 }
776
777 /*
778 * Find the indexed resource and return it.
779 */
780 mtx_lock(&rman_mtx);
781 TAILQ_FOREACH(res, &rm->rm_list, r_link) {
782 if (res_idx-- == 0) {
783 bzero(&ures, sizeof(ures));
784 ures.r_handle = (uintptr_t)res;
785 ures.r_parent = (uintptr_t)res->r_rm;
786 ures.r_device = (uintptr_t)res->r_dev;
787 if (res->r_dev != NULL) {
788 if (device_get_name(res->r_dev) != NULL) {
789 snprintf(ures.r_devname, RM_TEXTLEN,
790 "%s%d",
791 device_get_name(res->r_dev),
792 device_get_unit(res->r_dev));
793 } else {
794 strlcpy(ures.r_devname, "nomatch",
795 RM_TEXTLEN);
796 }
797 } else {
798 ures.r_devname[0] = '\0';
799 }
800 ures.r_start = res->r_start;
801 ures.r_size = res->r_end - res->r_start + 1;
802 ures.r_flags = res->r_flags;
803
804 mtx_unlock(&rman_mtx);
805 error = SYSCTL_OUT(req, &ures, sizeof(ures));
806 return (error);
807 }
808 }
809 mtx_unlock(&rman_mtx);
810 return (ENOENT);
811 }
812
813 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
814 "kernel resource manager");
Cache object: 5da02ccf8e32a1b3918f5e9d48d37723
|