FreeBSD/Linux Kernel Cross Reference
sys/dev/bhnd/siba/siba.c
1 /*-
2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3 * Copyright (c) 2017 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Landon Fuller
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17 * redistribution must be conditioned upon including a substantially
18 * similar Disclaimer requirement for further binary redistribution.
19 *
20 * NO WARRANTY
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGES.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/systm.h>
44
45 #include <machine/bus.h>
46
47 #include <dev/bhnd/cores/chipc/chipc.h>
48 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
49
50 #include "siba_eromvar.h"
51
52 #include "sibareg.h"
53 #include "sibavar.h"
54
55 /* RID used when allocating EROM resources */
56 #define SIBA_EROM_RID 0
57
58 static bhnd_erom_class_t *
59 siba_get_erom_class(driver_t *driver)
60 {
61 return (&siba_erom_parser);
62 }
63
64 int
65 siba_probe(device_t dev)
66 {
67 device_set_desc(dev, "SIBA BHND bus");
68 return (BUS_PROBE_DEFAULT);
69 }
70
71 /**
72 * Default siba(4) bus driver implementation of DEVICE_ATTACH().
73 *
74 * This implementation initializes internal siba(4) state and performs
75 * bus enumeration, and must be called by subclassing drivers in
76 * DEVICE_ATTACH() before any other bus methods.
77 */
78 int
79 siba_attach(device_t dev)
80 {
81 struct siba_softc *sc;
82 int error;
83
84 sc = device_get_softc(dev);
85 sc->dev = dev;
86
87 SIBA_LOCK_INIT(sc);
88
89 /* Enumerate children */
90 if ((error = siba_add_children(dev))) {
91 device_delete_children(dev);
92 SIBA_LOCK_DESTROY(sc);
93 return (error);
94 }
95
96 return (0);
97 }
98
99 int
100 siba_detach(device_t dev)
101 {
102 struct siba_softc *sc;
103 int error;
104
105 sc = device_get_softc(dev);
106
107 if ((error = bhnd_generic_detach(dev)))
108 return (error);
109
110 SIBA_LOCK_DESTROY(sc);
111
112 return (0);
113 }
114
115 int
116 siba_resume(device_t dev)
117 {
118 return (bhnd_generic_resume(dev));
119 }
120
121 int
122 siba_suspend(device_t dev)
123 {
124 return (bhnd_generic_suspend(dev));
125 }
126
127 static int
128 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
129 {
130 struct siba_softc *sc;
131 const struct siba_devinfo *dinfo;
132 const struct bhnd_core_info *cfg;
133
134 sc = device_get_softc(dev);
135 dinfo = device_get_ivars(child);
136 cfg = &dinfo->core_id.core_info;
137
138 switch (index) {
139 case BHND_IVAR_VENDOR:
140 *result = cfg->vendor;
141 return (0);
142 case BHND_IVAR_DEVICE:
143 *result = cfg->device;
144 return (0);
145 case BHND_IVAR_HWREV:
146 *result = cfg->hwrev;
147 return (0);
148 case BHND_IVAR_DEVICE_CLASS:
149 *result = bhnd_core_class(cfg);
150 return (0);
151 case BHND_IVAR_VENDOR_NAME:
152 *result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
153 return (0);
154 case BHND_IVAR_DEVICE_NAME:
155 *result = (uintptr_t) bhnd_core_name(cfg);
156 return (0);
157 case BHND_IVAR_CORE_INDEX:
158 *result = cfg->core_idx;
159 return (0);
160 case BHND_IVAR_CORE_UNIT:
161 *result = cfg->unit;
162 return (0);
163 case BHND_IVAR_PMU_INFO:
164 SIBA_LOCK(sc);
165 switch (dinfo->pmu_state) {
166 case SIBA_PMU_NONE:
167 *result = (uintptr_t)NULL;
168 SIBA_UNLOCK(sc);
169 return (0);
170
171 case SIBA_PMU_BHND:
172 *result = (uintptr_t)dinfo->pmu.bhnd_info;
173 SIBA_UNLOCK(sc);
174 return (0);
175
176 case SIBA_PMU_PWRCTL:
177 case SIBA_PMU_FIXED:
178 *result = (uintptr_t)NULL;
179 SIBA_UNLOCK(sc);
180 return (0);
181 }
182
183 panic("invalid PMU state: %d", dinfo->pmu_state);
184 return (ENXIO);
185
186 default:
187 return (ENOENT);
188 }
189 }
190
191 static int
192 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
193 {
194 struct siba_softc *sc;
195 struct siba_devinfo *dinfo;
196
197 sc = device_get_softc(dev);
198 dinfo = device_get_ivars(child);
199
200 switch (index) {
201 case BHND_IVAR_VENDOR:
202 case BHND_IVAR_DEVICE:
203 case BHND_IVAR_HWREV:
204 case BHND_IVAR_DEVICE_CLASS:
205 case BHND_IVAR_VENDOR_NAME:
206 case BHND_IVAR_DEVICE_NAME:
207 case BHND_IVAR_CORE_INDEX:
208 case BHND_IVAR_CORE_UNIT:
209 return (EINVAL);
210 case BHND_IVAR_PMU_INFO:
211 SIBA_LOCK(sc);
212 switch (dinfo->pmu_state) {
213 case SIBA_PMU_NONE:
214 case SIBA_PMU_BHND:
215 dinfo->pmu.bhnd_info = (void *)value;
216 dinfo->pmu_state = SIBA_PMU_BHND;
217 SIBA_UNLOCK(sc);
218 return (0);
219
220 case SIBA_PMU_PWRCTL:
221 case SIBA_PMU_FIXED:
222 panic("bhnd_set_pmu_info() called with siba PMU state "
223 "%d", dinfo->pmu_state);
224 return (ENXIO);
225 }
226
227 panic("invalid PMU state: %d", dinfo->pmu_state);
228 return (ENXIO);
229
230 default:
231 return (ENOENT);
232 }
233 }
234
235 static struct resource_list *
236 siba_get_resource_list(device_t dev, device_t child)
237 {
238 struct siba_devinfo *dinfo = device_get_ivars(child);
239 return (&dinfo->resources);
240 }
241
242 /* BHND_BUS_ALLOC_PMU() */
243 static int
244 siba_alloc_pmu(device_t dev, device_t child)
245 {
246 struct siba_softc *sc;
247 struct siba_devinfo *dinfo;
248 device_t chipc;
249 device_t pwrctl;
250 struct chipc_caps ccaps;
251 siba_pmu_state pmu_state;
252 int error;
253
254 if (device_get_parent(child) != dev)
255 return (EINVAL);
256
257 sc = device_get_softc(dev);
258 dinfo = device_get_ivars(child);
259 pwrctl = NULL;
260
261 /* Fetch ChipCommon capability flags */
262 chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
263 if (chipc != NULL) {
264 ccaps = *BHND_CHIPC_GET_CAPS(chipc);
265 bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
266 } else {
267 memset(&ccaps, 0, sizeof(ccaps));
268 }
269
270 /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
271 * advertises PMU support */
272 if (ccaps.pmu) {
273 if ((error = bhnd_generic_alloc_pmu(dev, child)))
274 return (error);
275
276 KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
277 ("unexpected PMU state: %d", dinfo->pmu_state));
278
279 return (0);
280 }
281
282 /*
283 * This is either a legacy PWRCTL chipset, or the device does not
284 * support dynamic clock control.
285 *
286 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
287 */
288 if (ccaps.pwr_ctrl) {
289 pmu_state = SIBA_PMU_PWRCTL;
290 pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
291 if (pwrctl == NULL) {
292 device_printf(dev, "PWRCTL not found\n");
293 return (ENODEV);
294 }
295 } else {
296 pmu_state = SIBA_PMU_FIXED;
297 pwrctl = NULL;
298 }
299
300 SIBA_LOCK(sc);
301
302 /* Per-core PMU state already allocated? */
303 if (dinfo->pmu_state != SIBA_PMU_NONE) {
304 panic("duplicate PMU allocation for %s",
305 device_get_nameunit(child));
306 }
307
308 /* Update the child's PMU allocation state, and transfer ownership of
309 * the PWRCTL provider reference (if any) */
310 dinfo->pmu_state = pmu_state;
311 dinfo->pmu.pwrctl = pwrctl;
312
313 SIBA_UNLOCK(sc);
314
315 return (0);
316 }
317
318 /* BHND_BUS_RELEASE_PMU() */
319 static int
320 siba_release_pmu(device_t dev, device_t child)
321 {
322 struct siba_softc *sc;
323 struct siba_devinfo *dinfo;
324 device_t pwrctl;
325 int error;
326
327 if (device_get_parent(child) != dev)
328 return (EINVAL);
329
330 sc = device_get_softc(dev);
331 dinfo = device_get_ivars(child);
332
333 SIBA_LOCK(sc);
334 switch(dinfo->pmu_state) {
335 case SIBA_PMU_NONE:
336 panic("pmu over-release for %s", device_get_nameunit(child));
337 SIBA_UNLOCK(sc);
338 return (ENXIO);
339
340 case SIBA_PMU_BHND:
341 SIBA_UNLOCK(sc);
342 return (bhnd_generic_release_pmu(dev, child));
343
344 case SIBA_PMU_PWRCTL:
345 /* Requesting BHND_CLOCK_DYN releases any outstanding clock
346 * reservations */
347 pwrctl = dinfo->pmu.pwrctl;
348 error = bhnd_pwrctl_request_clock(pwrctl, child,
349 BHND_CLOCK_DYN);
350 if (error) {
351 SIBA_UNLOCK(sc);
352 return (error);
353 }
354
355 /* Clean up the child's PMU state */
356 dinfo->pmu_state = SIBA_PMU_NONE;
357 dinfo->pmu.pwrctl = NULL;
358 SIBA_UNLOCK(sc);
359
360 /* Release the provider reference */
361 bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
362 return (0);
363
364 case SIBA_PMU_FIXED:
365 /* Clean up the child's PMU state */
366 KASSERT(dinfo->pmu.pwrctl == NULL,
367 ("PWRCTL reference with FIXED state"));
368
369 dinfo->pmu_state = SIBA_PMU_NONE;
370 dinfo->pmu.pwrctl = NULL;
371 SIBA_UNLOCK(sc);
372 }
373
374 panic("invalid PMU state: %d", dinfo->pmu_state);
375 }
376
377 /* BHND_BUS_GET_CLOCK_LATENCY() */
378 static int
379 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
380 u_int *latency)
381 {
382 struct siba_softc *sc;
383 struct siba_devinfo *dinfo;
384 int error;
385
386 if (device_get_parent(child) != dev)
387 return (EINVAL);
388
389 sc = device_get_softc(dev);
390 dinfo = device_get_ivars(child);
391
392 SIBA_LOCK(sc);
393 switch(dinfo->pmu_state) {
394 case SIBA_PMU_NONE:
395 panic("no active PMU request state");
396
397 SIBA_UNLOCK(sc);
398 return (ENXIO);
399
400 case SIBA_PMU_BHND:
401 SIBA_UNLOCK(sc);
402 return (bhnd_generic_get_clock_latency(dev, child, clock,
403 latency));
404
405 case SIBA_PMU_PWRCTL:
406 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
407 latency);
408 SIBA_UNLOCK(sc);
409
410 return (error);
411
412 case SIBA_PMU_FIXED:
413 SIBA_UNLOCK(sc);
414
415 /* HT clock is always available, and incurs no transition
416 * delay. */
417 switch (clock) {
418 case BHND_CLOCK_HT:
419 *latency = 0;
420 return (0);
421
422 default:
423 return (ENODEV);
424 }
425
426 return (ENODEV);
427 }
428
429 panic("invalid PMU state: %d", dinfo->pmu_state);
430 }
431
432 /* BHND_BUS_GET_CLOCK_FREQ() */
433 static int
434 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
435 u_int *freq)
436 {
437 struct siba_softc *sc;
438 struct siba_devinfo *dinfo;
439 int error;
440
441 if (device_get_parent(child) != dev)
442 return (EINVAL);
443
444 sc = device_get_softc(dev);
445 dinfo = device_get_ivars(child);
446
447 SIBA_LOCK(sc);
448 switch(dinfo->pmu_state) {
449 case SIBA_PMU_NONE:
450 panic("no active PMU request state");
451
452 SIBA_UNLOCK(sc);
453 return (ENXIO);
454
455 case SIBA_PMU_BHND:
456 SIBA_UNLOCK(sc);
457 return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
458
459 case SIBA_PMU_PWRCTL:
460 error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
461 freq);
462 SIBA_UNLOCK(sc);
463
464 return (error);
465
466 case SIBA_PMU_FIXED:
467 SIBA_UNLOCK(sc);
468
469 return (ENODEV);
470 }
471
472 panic("invalid PMU state: %d", dinfo->pmu_state);
473 }
474
475 /* BHND_BUS_REQUEST_EXT_RSRC() */
476 static int
477 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
478 {
479 struct siba_softc *sc;
480 struct siba_devinfo *dinfo;
481
482 if (device_get_parent(child) != dev)
483 return (EINVAL);
484
485 sc = device_get_softc(dev);
486 dinfo = device_get_ivars(child);
487
488 SIBA_LOCK(sc);
489 switch(dinfo->pmu_state) {
490 case SIBA_PMU_NONE:
491 panic("no active PMU request state");
492
493 SIBA_UNLOCK(sc);
494 return (ENXIO);
495
496 case SIBA_PMU_BHND:
497 SIBA_UNLOCK(sc);
498 return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
499
500 case SIBA_PMU_PWRCTL:
501 case SIBA_PMU_FIXED:
502 /* HW does not support per-core external resources */
503 SIBA_UNLOCK(sc);
504 return (ENODEV);
505 }
506
507 panic("invalid PMU state: %d", dinfo->pmu_state);
508 }
509
510 /* BHND_BUS_RELEASE_EXT_RSRC() */
511 static int
512 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
513 {
514 struct siba_softc *sc;
515 struct siba_devinfo *dinfo;
516
517 if (device_get_parent(child) != dev)
518 return (EINVAL);
519
520 sc = device_get_softc(dev);
521 dinfo = device_get_ivars(child);
522
523 SIBA_LOCK(sc);
524 switch(dinfo->pmu_state) {
525 case SIBA_PMU_NONE:
526 panic("no active PMU request state");
527
528 SIBA_UNLOCK(sc);
529 return (ENXIO);
530
531 case SIBA_PMU_BHND:
532 SIBA_UNLOCK(sc);
533 return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
534
535 case SIBA_PMU_PWRCTL:
536 case SIBA_PMU_FIXED:
537 /* HW does not support per-core external resources */
538 SIBA_UNLOCK(sc);
539 return (ENODEV);
540 }
541
542 panic("invalid PMU state: %d", dinfo->pmu_state);
543 }
544
545 /* BHND_BUS_REQUEST_CLOCK() */
546 static int
547 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
548 {
549 struct siba_softc *sc;
550 struct siba_devinfo *dinfo;
551 int error;
552
553 if (device_get_parent(child) != dev)
554 return (EINVAL);
555
556 sc = device_get_softc(dev);
557 dinfo = device_get_ivars(child);
558
559 SIBA_LOCK(sc);
560 switch(dinfo->pmu_state) {
561 case SIBA_PMU_NONE:
562 panic("no active PMU request state");
563
564 SIBA_UNLOCK(sc);
565 return (ENXIO);
566
567 case SIBA_PMU_BHND:
568 SIBA_UNLOCK(sc);
569 return (bhnd_generic_request_clock(dev, child, clock));
570
571 case SIBA_PMU_PWRCTL:
572 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
573 clock);
574 SIBA_UNLOCK(sc);
575
576 return (error);
577
578 case SIBA_PMU_FIXED:
579 SIBA_UNLOCK(sc);
580
581 /* HT clock is always available, and fulfills any of the
582 * following clock requests */
583 switch (clock) {
584 case BHND_CLOCK_DYN:
585 case BHND_CLOCK_ILP:
586 case BHND_CLOCK_ALP:
587 case BHND_CLOCK_HT:
588 return (0);
589
590 default:
591 return (ENODEV);
592 }
593 }
594
595 panic("invalid PMU state: %d", dinfo->pmu_state);
596 }
597
598 /* BHND_BUS_ENABLE_CLOCKS() */
599 static int
600 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
601 {
602 struct siba_softc *sc;
603 struct siba_devinfo *dinfo;
604
605 if (device_get_parent(child) != dev)
606 return (EINVAL);
607
608 sc = device_get_softc(dev);
609 dinfo = device_get_ivars(child);
610
611 SIBA_LOCK(sc);
612 switch(dinfo->pmu_state) {
613 case SIBA_PMU_NONE:
614 panic("no active PMU request state");
615
616 SIBA_UNLOCK(sc);
617 return (ENXIO);
618
619 case SIBA_PMU_BHND:
620 SIBA_UNLOCK(sc);
621 return (bhnd_generic_enable_clocks(dev, child, clocks));
622
623 case SIBA_PMU_PWRCTL:
624 case SIBA_PMU_FIXED:
625 SIBA_UNLOCK(sc);
626
627 /* All (supported) clocks are already enabled by default */
628 clocks &= ~(BHND_CLOCK_DYN |
629 BHND_CLOCK_ILP |
630 BHND_CLOCK_ALP |
631 BHND_CLOCK_HT);
632
633 if (clocks != 0) {
634 device_printf(dev, "%s requested unknown clocks: %#x\n",
635 device_get_nameunit(child), clocks);
636 return (ENODEV);
637 }
638
639 return (0);
640 }
641
642 panic("invalid PMU state: %d", dinfo->pmu_state);
643 }
644
645 static int
646 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
647 {
648 uint32_t tmhigh;
649 int error;
650
651 error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
652 if (error)
653 return (error);
654
655 *iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
656 return (0);
657 }
658
659 static int
660 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
661 {
662 uint32_t ts_low;
663 int error;
664
665 if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
666 return (error);
667
668 *ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
669 return (0);
670 }
671
672 static int
673 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
674 {
675 struct siba_devinfo *dinfo;
676 struct bhnd_resource *r;
677 uint32_t ts_low, ts_mask;
678
679 if (device_get_parent(child) != dev)
680 return (EINVAL);
681
682 /* Fetch CFG0 mapping */
683 dinfo = device_get_ivars(child);
684 if ((r = dinfo->cfg_res[0]) == NULL)
685 return (ENODEV);
686
687 /* Mask and set TMSTATELOW core flag bits */
688 ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
689 ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
690
691 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
692 ts_low, ts_mask);
693 return (0);
694 }
695
696 static bool
697 siba_is_hw_suspended(device_t dev, device_t child)
698 {
699 uint32_t ts_low;
700 uint16_t ioctl;
701 int error;
702
703 /* Fetch target state */
704 error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
705 if (error) {
706 device_printf(child, "error reading HW reset state: %d\n",
707 error);
708 return (true);
709 }
710
711 /* Is core held in RESET? */
712 if (ts_low & SIBA_TML_RESET)
713 return (true);
714
715 /* Is target reject enabled? */
716 if (ts_low & SIBA_TML_REJ_MASK)
717 return (true);
718
719 /* Is core clocked? */
720 ioctl = SIBA_REG_GET(ts_low, TML_SICF);
721 if (!(ioctl & BHND_IOCTL_CLK_EN))
722 return (true);
723
724 return (false);
725 }
726
727 static int
728 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
729 uint16_t reset_ioctl)
730 {
731 struct siba_devinfo *dinfo;
732 struct bhnd_resource *r;
733 uint32_t ts_low, imstate;
734 uint16_t clkflags;
735 int error;
736
737 if (device_get_parent(child) != dev)
738 return (EINVAL);
739
740 dinfo = device_get_ivars(child);
741
742 /* Can't suspend the core without access to the CFG0 registers */
743 if ((r = dinfo->cfg_res[0]) == NULL)
744 return (ENODEV);
745
746 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
747 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
748 if (ioctl & clkflags)
749 return (EINVAL);
750
751 /* Place core into known RESET state */
752 if ((error = bhnd_suspend_hw(child, reset_ioctl)))
753 return (error);
754
755 /* Set RESET, clear REJ, set the caller's IOCTL flags, and
756 * force clocks to ensure the signal propagates throughout the
757 * core. */
758 ts_low = SIBA_TML_RESET |
759 (ioctl << SIBA_TML_SICF_SHIFT) |
760 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
761 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
762
763 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
764 ts_low, UINT32_MAX);
765
766 /* Clear any target errors */
767 if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
768 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
769 0x0, SIBA_TMH_SERR);
770 }
771
772 /* Clear any initiator errors */
773 imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
774 if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
775 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
776 SIBA_IM_IBE|SIBA_IM_TO);
777 }
778
779 /* Release from RESET while leaving clocks forced, ensuring the
780 * signal propagates throughout the core */
781 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
782 SIBA_TML_RESET);
783
784 /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
785 * bit and allow the core to manage clock gating. */
786 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
787 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
788
789 return (0);
790 }
791
792 static int
793 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
794 {
795 struct siba_softc *sc;
796 struct siba_devinfo *dinfo;
797 struct bhnd_resource *r;
798 uint32_t idl, ts_low, ts_mask;
799 uint16_t cflags, clkflags;
800 int error;
801
802 if (device_get_parent(child) != dev)
803 return (EINVAL);
804
805 sc = device_get_softc(dev);
806 dinfo = device_get_ivars(child);
807
808 /* Can't suspend the core without access to the CFG0 registers */
809 if ((r = dinfo->cfg_res[0]) == NULL)
810 return (ENODEV);
811
812 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
813 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
814 if (ioctl & clkflags)
815 return (EINVAL);
816
817 /* Already in RESET? */
818 ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
819 if (ts_low & SIBA_TML_RESET)
820 return (0);
821
822 /* If clocks are already disabled, we can place the core directly
823 * into RESET|REJ while setting the caller's IOCTL flags. */
824 cflags = SIBA_REG_GET(ts_low, TML_SICF);
825 if (!(cflags & BHND_IOCTL_CLK_EN)) {
826 ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
827 (ioctl << SIBA_TML_SICF_SHIFT);
828 ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
829
830 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
831 ts_low, ts_mask);
832 return (0);
833 }
834
835 /* Reject further transactions reaching this core */
836 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
837 SIBA_TML_REJ, SIBA_TML_REJ);
838
839 /* Wait for transaction busy flag to clear for all transactions
840 * initiated by this core */
841 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
842 0x0, SIBA_TMH_BUSY, 100000);
843 if (error)
844 return (error);
845
846 /* If this is an initiator core, we need to reject initiator
847 * transactions too. */
848 idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
849 if (idl & SIBA_IDL_INIT) {
850 /* Reject further initiator transactions */
851 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
852 SIBA_IM_RJ, SIBA_IM_RJ);
853
854 /* Wait for initiator busy flag to clear */
855 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
856 0x0, SIBA_IM_BY, 100000);
857 if (error)
858 return (error);
859 }
860
861 /* Put the core into RESET, set the caller's IOCTL flags, and
862 * force clocks to ensure the RESET signal propagates throughout the
863 * core. */
864 ts_low = SIBA_TML_RESET |
865 (ioctl << SIBA_TML_SICF_SHIFT) |
866 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
867 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
868 ts_mask = SIBA_TML_RESET |
869 SIBA_TML_SICF_MASK;
870
871 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
872 ts_mask);
873
874 /* Give RESET ample time */
875 DELAY(10);
876
877 /* Clear previously asserted initiator reject */
878 if (idl & SIBA_IDL_INIT) {
879 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
880 SIBA_IM_RJ);
881 }
882
883 /* Disable all clocks, leaving RESET and REJ asserted */
884 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
885 (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
886
887 /*
888 * Core is now in RESET.
889 *
890 * If the core holds any PWRCTL clock reservations, we need to release
891 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
892 * automatically clearing clkctl
893 */
894 SIBA_LOCK(sc);
895 if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
896 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
897 BHND_CLOCK_DYN);
898 SIBA_UNLOCK(sc);
899
900 if (error) {
901 device_printf(child, "failed to release clock request: "
902 "%d", error);
903 return (error);
904 }
905
906 return (0);
907 } else {
908 SIBA_UNLOCK(sc);
909 return (0);
910 }
911 }
912
913 static int
914 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
915 u_int width)
916 {
917 struct siba_devinfo *dinfo;
918 rman_res_t r_size;
919
920 /* Must be directly attached */
921 if (device_get_parent(child) != dev)
922 return (EINVAL);
923
924 /* CFG0 registers must be available */
925 dinfo = device_get_ivars(child);
926 if (dinfo->cfg_res[0] == NULL)
927 return (ENODEV);
928
929 /* Offset must fall within CFG0 */
930 r_size = rman_get_size(dinfo->cfg_res[0]->res);
931 if (r_size < offset || r_size - offset < width)
932 return (EFAULT);
933
934 switch (width) {
935 case 1:
936 *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
937 offset);
938 return (0);
939 case 2:
940 *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
941 offset);
942 return (0);
943 case 4:
944 *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
945 offset);
946 return (0);
947 default:
948 return (EINVAL);
949 }
950 }
951
952 static int
953 siba_write_config(device_t dev, device_t child, bus_size_t offset,
954 const void *value, u_int width)
955 {
956 struct siba_devinfo *dinfo;
957 struct bhnd_resource *r;
958 rman_res_t r_size;
959
960 /* Must be directly attached */
961 if (device_get_parent(child) != dev)
962 return (EINVAL);
963
964 /* CFG0 registers must be available */
965 dinfo = device_get_ivars(child);
966 if ((r = dinfo->cfg_res[0]) == NULL)
967 return (ENODEV);
968
969 /* Offset must fall within CFG0 */
970 r_size = rman_get_size(r->res);
971 if (r_size < offset || r_size - offset < width)
972 return (EFAULT);
973
974 switch (width) {
975 case 1:
976 bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
977 return (0);
978 case 2:
979 bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
980 return (0);
981 case 4:
982 bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
983 return (0);
984 default:
985 return (EINVAL);
986 }
987 }
988
989 static u_int
990 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
991 {
992 struct siba_devinfo *dinfo;
993
994 /* delegate non-bus-attached devices to our parent */
995 if (device_get_parent(child) != dev)
996 return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
997 type));
998
999 dinfo = device_get_ivars(child);
1000 return (siba_port_count(&dinfo->core_id, type));
1001 }
1002
1003 static u_int
1004 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1005 u_int port)
1006 {
1007 struct siba_devinfo *dinfo;
1008
1009 /* delegate non-bus-attached devices to our parent */
1010 if (device_get_parent(child) != dev)
1011 return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1012 type, port));
1013
1014 dinfo = device_get_ivars(child);
1015 return (siba_port_region_count(&dinfo->core_id, type, port));
1016 }
1017
1018 static int
1019 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1020 u_int port_num, u_int region_num)
1021 {
1022 struct siba_devinfo *dinfo;
1023 struct siba_addrspace *addrspace;
1024 struct siba_cfg_block *cfg;
1025
1026 /* delegate non-bus-attached devices to our parent */
1027 if (device_get_parent(child) != dev)
1028 return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1029 port_type, port_num, region_num));
1030
1031 dinfo = device_get_ivars(child);
1032
1033 /* Look for a matching addrspace entry */
1034 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1035 if (addrspace != NULL)
1036 return (addrspace->sa_rid);
1037
1038 /* Try the config blocks */
1039 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1040 if (cfg != NULL)
1041 return (cfg->cb_rid);
1042
1043 /* Not found */
1044 return (-1);
1045 }
1046
1047 static int
1048 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1049 bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1050 {
1051 struct siba_devinfo *dinfo;
1052
1053 /* delegate non-bus-attached devices to our parent */
1054 if (device_get_parent(child) != dev)
1055 return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1056 type, rid, port_type, port_num, region_num));
1057
1058 dinfo = device_get_ivars(child);
1059
1060 /* Ports are always memory mapped */
1061 if (type != SYS_RES_MEMORY)
1062 return (EINVAL);
1063
1064 /* Look for a matching addrspace entry */
1065 for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) {
1066 if (dinfo->addrspace[i].sa_rid != rid)
1067 continue;
1068
1069 *port_type = BHND_PORT_DEVICE;
1070 *port_num = siba_addrspace_device_port(i);
1071 *region_num = siba_addrspace_device_region(i);
1072 return (0);
1073 }
1074
1075 /* Try the config blocks */
1076 for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1077 if (dinfo->cfg[i].cb_rid != rid)
1078 continue;
1079
1080 *port_type = BHND_PORT_AGENT;
1081 *port_num = siba_cfg_agent_port(i);
1082 *region_num = siba_cfg_agent_region(i);
1083 return (0);
1084 }
1085
1086 /* Not found */
1087 return (ENOENT);
1088 }
1089
1090 static int
1091 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1092 u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1093 {
1094 struct siba_devinfo *dinfo;
1095 struct siba_addrspace *addrspace;
1096 struct siba_cfg_block *cfg;
1097
1098 /* delegate non-bus-attached devices to our parent */
1099 if (device_get_parent(child) != dev) {
1100 return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1101 port_type, port_num, region_num, addr, size));
1102 }
1103
1104 dinfo = device_get_ivars(child);
1105
1106 /* Look for a matching addrspace */
1107 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1108 if (addrspace != NULL) {
1109 *addr = addrspace->sa_base;
1110 *size = addrspace->sa_size - addrspace->sa_bus_reserved;
1111 return (0);
1112 }
1113
1114 /* Look for a matching cfg block */
1115 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1116 if (cfg != NULL) {
1117 *addr = cfg->cb_base;
1118 *size = cfg->cb_size;
1119 return (0);
1120 }
1121
1122 /* Not found */
1123 return (ENOENT);
1124 }
1125
1126 /**
1127 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1128 */
1129 u_int
1130 siba_get_intr_count(device_t dev, device_t child)
1131 {
1132 struct siba_devinfo *dinfo;
1133
1134 /* delegate non-bus-attached devices to our parent */
1135 if (device_get_parent(child) != dev)
1136 return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1137
1138 dinfo = device_get_ivars(child);
1139 if (!dinfo->core_id.intr_en) {
1140 /* No interrupts */
1141 return (0);
1142 } else {
1143 /* One assigned interrupt */
1144 return (1);
1145 }
1146 }
1147
1148 /**
1149 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1150 */
1151 int
1152 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1153 {
1154 struct siba_devinfo *dinfo;
1155
1156 /* delegate non-bus-attached devices to our parent */
1157 if (device_get_parent(child) != dev)
1158 return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1159 intr, ivec));
1160
1161 /* Must be a valid interrupt ID */
1162 if (intr >= siba_get_intr_count(dev, child))
1163 return (ENXIO);
1164
1165 KASSERT(intr == 0, ("invalid ivec %u", intr));
1166
1167 dinfo = device_get_ivars(child);
1168
1169 KASSERT(dinfo->core_id.intr_en,
1170 ("core does not have an interrupt assigned"));
1171
1172 *ivec = dinfo->core_id.intr_flag;
1173 return (0);
1174 }
1175
1176 /**
1177 * Map per-core configuration blocks for @p dinfo.
1178 *
1179 * @param dev The siba bus device.
1180 * @param dinfo The device info instance on which to map all per-core
1181 * configuration blocks.
1182 */
1183 static int
1184 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1185 {
1186 struct siba_addrspace *addrspace;
1187 rman_res_t r_start, r_count, r_end;
1188 uint8_t num_cfg;
1189 int rid;
1190
1191 num_cfg = dinfo->core_id.num_cfg_blocks;
1192 if (num_cfg > SIBA_MAX_CFG) {
1193 device_printf(dev, "config block count %hhu out of range\n",
1194 num_cfg);
1195 return (ENXIO);
1196 }
1197
1198 /* Fetch the core register address space */
1199 addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1200 if (addrspace == NULL) {
1201 device_printf(dev, "missing device registers\n");
1202 return (ENXIO);
1203 }
1204
1205 /*
1206 * Map the per-core configuration blocks
1207 */
1208 for (uint8_t i = 0; i < num_cfg; i++) {
1209 /* Add to child's resource list */
1210 r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1211 r_count = SIBA_CFG_SIZE;
1212 r_end = r_start + r_count - 1;
1213
1214 rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1215 r_start, r_end, r_count);
1216
1217 /* Initialize config block descriptor */
1218 dinfo->cfg[i] = ((struct siba_cfg_block) {
1219 .cb_base = r_start,
1220 .cb_size = SIBA_CFG_SIZE,
1221 .cb_rid = rid
1222 });
1223
1224 /* Map the config resource for bus-level access */
1225 dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1226 dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1227 SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1228 r_count, RF_ACTIVE|RF_SHAREABLE);
1229
1230 if (dinfo->cfg_res[i] == NULL) {
1231 device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1232 i);
1233 return (ENXIO);
1234 }
1235 }
1236
1237 return (0);
1238 }
1239
1240 static device_t
1241 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1242 {
1243 struct siba_devinfo *dinfo;
1244 device_t child;
1245
1246 child = device_add_child_ordered(dev, order, name, unit);
1247 if (child == NULL)
1248 return (NULL);
1249
1250 if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1251 device_delete_child(dev, child);
1252 return (NULL);
1253 }
1254
1255 device_set_ivars(child, dinfo);
1256
1257 return (child);
1258 }
1259
1260 static void
1261 siba_child_deleted(device_t dev, device_t child)
1262 {
1263 struct siba_devinfo *dinfo;
1264
1265 /* Call required bhnd(4) implementation */
1266 bhnd_generic_child_deleted(dev, child);
1267
1268 /* Free siba device info */
1269 if ((dinfo = device_get_ivars(child)) != NULL)
1270 siba_free_dinfo(dev, child, dinfo);
1271
1272 device_set_ivars(child, NULL);
1273 }
1274
1275 /**
1276 * Scan the core table and add all valid discovered cores to
1277 * the bus.
1278 *
1279 * @param dev The siba bus device.
1280 */
1281 int
1282 siba_add_children(device_t dev)
1283 {
1284 bhnd_erom_t *erom;
1285 struct siba_erom *siba_erom;
1286 struct bhnd_erom_io *eio;
1287 const struct bhnd_chipid *cid;
1288 struct siba_core_id *cores;
1289 device_t *children;
1290 int error;
1291
1292 cid = BHND_BUS_GET_CHIPID(dev, dev);
1293
1294 /* Allocate our EROM parser */
1295 eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID);
1296 erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio);
1297 if (erom == NULL) {
1298 bhnd_erom_io_fini(eio);
1299 return (ENODEV);
1300 }
1301
1302 /* Allocate our temporary core and device table */
1303 cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK);
1304 children = malloc(sizeof(*children) * cid->ncores, M_BHND,
1305 M_WAITOK | M_ZERO);
1306
1307 /*
1308 * Add child devices for all discovered cores.
1309 *
1310 * On bridged devices, we'll exhaust our available register windows if
1311 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1312 * defer mapping of the per-core siba(4) config blocks until all cores
1313 * have been enumerated and otherwise configured.
1314 */
1315 siba_erom = (struct siba_erom *)erom;
1316 for (u_int i = 0; i < cid->ncores; i++) {
1317 struct siba_devinfo *dinfo;
1318 device_t child;
1319
1320 if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i])))
1321 goto failed;
1322
1323 /* Add the child device */
1324 child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1325 if (child == NULL) {
1326 error = ENXIO;
1327 goto failed;
1328 }
1329
1330 children[i] = child;
1331
1332 /* Initialize per-device bus info */
1333 if ((dinfo = device_get_ivars(child)) == NULL) {
1334 error = ENXIO;
1335 goto failed;
1336 }
1337
1338 if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i])))
1339 goto failed;
1340
1341 /* If pins are floating or the hardware is otherwise
1342 * unpopulated, the device shouldn't be used. */
1343 if (bhnd_is_hw_disabled(child))
1344 device_disable(child);
1345 }
1346
1347 /* Free EROM (and any bridge register windows it might hold) */
1348 bhnd_erom_free(erom);
1349 erom = NULL;
1350
1351 /* Map all valid core's config register blocks and perform interrupt
1352 * assignment */
1353 for (u_int i = 0; i < cid->ncores; i++) {
1354 struct siba_devinfo *dinfo;
1355 device_t child;
1356
1357 child = children[i];
1358
1359 /* Skip if core is disabled */
1360 if (bhnd_is_hw_disabled(child))
1361 continue;
1362
1363 dinfo = device_get_ivars(child);
1364
1365 /* Map the core's config blocks */
1366 if ((error = siba_map_cfg_resources(dev, dinfo)))
1367 goto failed;
1368
1369 /* Issue bus callback for fully initialized child. */
1370 BHND_BUS_CHILD_ADDED(dev, child);
1371 }
1372
1373 free(cores, M_BHND);
1374 free(children, M_BHND);
1375
1376 return (0);
1377
1378 failed:
1379 for (u_int i = 0; i < cid->ncores; i++) {
1380 if (children[i] == NULL)
1381 continue;
1382
1383 device_delete_child(dev, children[i]);
1384 }
1385
1386 free(cores, M_BHND);
1387 free(children, M_BHND);
1388 if (erom != NULL)
1389 bhnd_erom_free(erom);
1390
1391 return (error);
1392 }
1393
1394 static device_method_t siba_methods[] = {
1395 /* Device interface */
1396 DEVMETHOD(device_probe, siba_probe),
1397 DEVMETHOD(device_attach, siba_attach),
1398 DEVMETHOD(device_detach, siba_detach),
1399 DEVMETHOD(device_resume, siba_resume),
1400 DEVMETHOD(device_suspend, siba_suspend),
1401
1402 /* Bus interface */
1403 DEVMETHOD(bus_add_child, siba_add_child),
1404 DEVMETHOD(bus_child_deleted, siba_child_deleted),
1405 DEVMETHOD(bus_read_ivar, siba_read_ivar),
1406 DEVMETHOD(bus_write_ivar, siba_write_ivar),
1407 DEVMETHOD(bus_get_resource_list, siba_get_resource_list),
1408
1409 /* BHND interface */
1410 DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class),
1411 DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu),
1412 DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu),
1413 DEVMETHOD(bhnd_bus_request_clock, siba_request_clock),
1414 DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks),
1415 DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc),
1416 DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc),
1417 DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq),
1418 DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency),
1419 DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl),
1420 DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl),
1421 DEVMETHOD(bhnd_bus_read_iost, siba_read_iost),
1422 DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended),
1423 DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw),
1424 DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw),
1425 DEVMETHOD(bhnd_bus_read_config, siba_read_config),
1426 DEVMETHOD(bhnd_bus_write_config, siba_write_config),
1427 DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count),
1428 DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count),
1429 DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid),
1430 DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid),
1431 DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr),
1432 DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count),
1433 DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec),
1434
1435 DEVMETHOD_END
1436 };
1437
1438 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1439
1440 MODULE_VERSION(siba, 1);
1441 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
Cache object: aa2c5fd2e2aaea765859c12f1840de91
|