1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2022 Dmitry Salychev
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 /*
32 * The DPAA2 Management Complex (MC) bus driver.
33 *
34 * MC is a hardware resource manager which can be found in several NXP
35 * SoCs (LX2160A, for example) and provides an access to the specialized
36 * hardware objects used in network-oriented packet processing applications.
37 */
38
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/queue.h>
50
51 #include <vm/vm.h>
52
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55
56 #ifdef DEV_ACPI
57 #include <contrib/dev/acpica/include/acpi.h>
58 #include <dev/acpica/acpivar.h>
59 #endif
60
61 #ifdef FDT
62 #include <dev/ofw/openfirm.h>
63 #include <dev/ofw/ofw_bus.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #include <dev/ofw/ofw_pci.h>
66 #endif
67
68 #include "pcib_if.h"
69 #include "pci_if.h"
70
71 #include "dpaa2_mc.h"
72
73 /* Macros to read/write MC registers */
74 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
75 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
76
77 #define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0)
78
79 #define IORT_DEVICE_NAME "MCE"
80
81 /* MC Registers */
82 #define MC_REG_GCR1 0x0000u
83 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */
84 #define MC_REG_GSR 0x0008u
85 #define MC_REG_FAPR 0x0028u
86
87 /* General Control Register 1 (GCR1) */
88 #define GCR1_P1_STOP 0x80000000u
89 #define GCR1_P2_STOP 0x40000000u
90
91 /* General Status Register (GSR) */
92 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31)
93 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30)
94 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8)
95 #define GSR_MCS(v) (((v) & 0xFFu) >> 0)
96
97 /* Timeouts to wait for the MC status. */
98 #define MC_STAT_TIMEOUT 1000u /* us */
99 #define MC_STAT_ATTEMPTS 100u
100
101 /**
102 * @brief Structure to describe a DPAA2 device as a managed resource.
103 */
104 struct dpaa2_mc_devinfo {
105 STAILQ_ENTRY(dpaa2_mc_devinfo) link;
106 device_t dpaa2_dev;
107 uint32_t flags;
108 uint32_t owners;
109 };
110
111 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex");
112
113 static struct resource_spec dpaa2_mc_spec[] = {
114 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
115 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
116 RESOURCE_SPEC_END
117 };
118
119 static u_int dpaa2_mc_get_xref(device_t, device_t);
120 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *);
121 static struct rman *dpaa2_mc_rman(device_t, int);
122
123 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *);
124 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *);
125 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *,
126 uint32_t *);
127
128 /*
129 * For device interface.
130 */
131
132 int
133 dpaa2_mc_attach(device_t dev)
134 {
135 struct dpaa2_mc_softc *sc;
136 struct resource_map_request req;
137 uint32_t val;
138 int error;
139
140 sc = device_get_softc(dev);
141 sc->dev = dev;
142 sc->msi_allocated = false;
143 sc->msi_owner = NULL;
144
145 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res);
146 if (error) {
147 device_printf(dev, "%s: failed to allocate resources\n",
148 __func__);
149 return (ENXIO);
150 }
151
152 if (sc->res[1]) {
153 resource_init_map_request(&req);
154 req.memattr = VM_MEMATTR_DEVICE;
155 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1],
156 &req, &sc->map[1]);
157 if (error) {
158 device_printf(dev, "%s: failed to map control "
159 "registers\n", __func__);
160 dpaa2_mc_detach(dev);
161 return (ENXIO);
162 }
163
164 if (bootverbose)
165 device_printf(dev,
166 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
167 mcreg_read_4(sc, MC_REG_GCR1),
168 mcreg_read_4(sc, MC_REG_GCR2),
169 mcreg_read_4(sc, MC_REG_GSR),
170 mcreg_read_4(sc, MC_REG_FAPR));
171
172 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */
173 val = mcreg_read_4(sc, MC_REG_GCR1) &
174 ~(GCR1_P1_STOP | GCR1_P2_STOP);
175 mcreg_write_4(sc, MC_REG_GCR1, val);
176
177 /* Poll MC status. */
178 if (bootverbose)
179 device_printf(dev, "polling MC status...\n");
180 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) {
181 val = mcreg_read_4(sc, MC_REG_GSR);
182 if (GSR_MCS(val) != 0u)
183 break;
184 DELAY(MC_STAT_TIMEOUT);
185 }
186
187 if (bootverbose)
188 device_printf(dev,
189 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
190 mcreg_read_4(sc, MC_REG_GCR1),
191 mcreg_read_4(sc, MC_REG_GCR2),
192 mcreg_read_4(sc, MC_REG_GSR),
193 mcreg_read_4(sc, MC_REG_FAPR));
194 }
195
196 /* At least 64 bytes of the command portal should be available. */
197 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
198 device_printf(dev, "%s: MC portal memory region too small: "
199 "%jd\n", __func__, rman_get_size(sc->res[0]));
200 dpaa2_mc_detach(dev);
201 return (ENXIO);
202 }
203
204 /* Map MC portal memory resource. */
205 resource_init_map_request(&req);
206 req.memattr = VM_MEMATTR_DEVICE;
207 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0],
208 &req, &sc->map[0]);
209 if (error) {
210 device_printf(dev, "Failed to map MC portal memory\n");
211 dpaa2_mc_detach(dev);
212 return (ENXIO);
213 }
214
215 /* Initialize a resource manager for the DPAA2 I/O objects. */
216 sc->dpio_rman.rm_type = RMAN_ARRAY;
217 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects";
218 error = rman_init(&sc->dpio_rman);
219 if (error) {
220 device_printf(dev, "Failed to initialize a resource manager for "
221 "the DPAA2 I/O objects: error=%d\n", error);
222 dpaa2_mc_detach(dev);
223 return (ENXIO);
224 }
225
226 /* Initialize a resource manager for the DPAA2 buffer pools. */
227 sc->dpbp_rman.rm_type = RMAN_ARRAY;
228 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects";
229 error = rman_init(&sc->dpbp_rman);
230 if (error) {
231 device_printf(dev, "Failed to initialize a resource manager for "
232 "the DPAA2 buffer pools: error=%d\n", error);
233 dpaa2_mc_detach(dev);
234 return (ENXIO);
235 }
236
237 /* Initialize a resource manager for the DPAA2 concentrators. */
238 sc->dpcon_rman.rm_type = RMAN_ARRAY;
239 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects";
240 error = rman_init(&sc->dpcon_rman);
241 if (error) {
242 device_printf(dev, "Failed to initialize a resource manager for "
243 "the DPAA2 concentrators: error=%d\n", error);
244 dpaa2_mc_detach(dev);
245 return (ENXIO);
246 }
247
248 /* Initialize a resource manager for the DPAA2 MC portals. */
249 sc->dpmcp_rman.rm_type = RMAN_ARRAY;
250 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects";
251 error = rman_init(&sc->dpmcp_rman);
252 if (error) {
253 device_printf(dev, "Failed to initialize a resource manager for "
254 "the DPAA2 MC portals: error=%d\n", error);
255 dpaa2_mc_detach(dev);
256 return (ENXIO);
257 }
258
259 /* Initialize a list of non-allocatable DPAA2 devices. */
260 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF);
261 STAILQ_INIT(&sc->mdev_list);
262
263 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF);
264
265 /*
266 * Add a root resource container as the only child of the bus. All of
267 * the direct descendant containers will be attached to the root one
268 * instead of the MC device.
269 */
270 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0);
271 if (sc->rcdev == NULL) {
272 dpaa2_mc_detach(dev);
273 return (ENXIO);
274 }
275 bus_generic_probe(dev);
276 bus_generic_attach(dev);
277
278 return (0);
279 }
280
281 int
282 dpaa2_mc_detach(device_t dev)
283 {
284 struct dpaa2_mc_softc *sc;
285 struct dpaa2_devinfo *dinfo = NULL;
286 int error;
287
288 bus_generic_detach(dev);
289
290 sc = device_get_softc(dev);
291 if (sc->rcdev)
292 device_delete_child(dev, sc->rcdev);
293 bus_release_resources(dev, dpaa2_mc_spec, sc->res);
294
295 dinfo = device_get_ivars(dev);
296 if (dinfo)
297 free(dinfo, M_DPAA2_MC);
298
299 error = bus_generic_detach(dev);
300 if (error != 0)
301 return (error);
302
303 return (device_delete_children(dev));
304 }
305
306 /*
307 * For bus interface.
308 */
309
310 struct resource *
311 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid,
312 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
313 {
314 struct resource *res;
315 struct rman *rm;
316 int error;
317
318 rm = dpaa2_mc_rman(mcdev, type);
319 if (!rm)
320 return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child,
321 type, rid, start, end, count, flags));
322
323 /*
324 * Skip managing DPAA2-specific resource. It must be provided to MC by
325 * calling DPAA2_MC_MANAGE_DEV() beforehand.
326 */
327 if (type <= DPAA2_DEV_MC) {
328 error = rman_manage_region(rm, start, end);
329 if (error) {
330 device_printf(mcdev, "rman_manage_region() failed: "
331 "start=%#jx, end=%#jx, error=%d\n", start, end,
332 error);
333 goto fail;
334 }
335 }
336
337 res = rman_reserve_resource(rm, start, end, count, flags, child);
338 if (!res) {
339 device_printf(mcdev, "rman_reserve_resource() failed: "
340 "start=%#jx, end=%#jx, count=%#jx\n", start, end, count);
341 goto fail;
342 }
343
344 rman_set_rid(res, *rid);
345
346 if (flags & RF_ACTIVE) {
347 if (bus_activate_resource(child, type, *rid, res)) {
348 device_printf(mcdev, "bus_activate_resource() failed: "
349 "rid=%d, res=%#jx\n", *rid, (uintmax_t) res);
350 rman_release_resource(res);
351 goto fail;
352 }
353 }
354
355 return (res);
356 fail:
357 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, "
358 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end,
359 count, flags);
360 return (NULL);
361 }
362
363 int
364 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type,
365 struct resource *r, rman_res_t start, rman_res_t end)
366 {
367 struct rman *rm;
368
369 rm = dpaa2_mc_rman(mcdev, type);
370 if (rm)
371 return (rman_adjust_resource(r, start, end));
372 return (bus_generic_adjust_resource(mcdev, child, type, r, start, end));
373 }
374
375 int
376 dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid,
377 struct resource *r)
378 {
379 struct rman *rm;
380
381 rm = dpaa2_mc_rman(mcdev, type);
382 if (rm) {
383 KASSERT(rman_is_region_manager(r, rm), ("rman mismatch"));
384 rman_release_resource(r);
385 }
386
387 return (bus_generic_release_resource(mcdev, child, type, rid, r));
388 }
389
390 int
391 dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid,
392 struct resource *r)
393 {
394 int rc;
395
396 if ((rc = rman_activate_resource(r)) != 0)
397 return (rc);
398
399 return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
400 rid, r));
401 }
402
403 int
404 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid,
405 struct resource *r)
406 {
407 int rc;
408
409 if ((rc = rman_deactivate_resource(r)) != 0)
410 return (rc);
411
412 return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
413 rid, r));
414 }
415
416 /*
417 * For pseudo-pcib interface.
418 */
419
420 int
421 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
422 int *irqs)
423 {
424 #if defined(INTRNG)
425 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs));
426 #else
427 return (ENXIO);
428 #endif
429 }
430
431 int
432 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs)
433 {
434 #if defined(INTRNG)
435 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs));
436 #else
437 return (ENXIO);
438 #endif
439 }
440
441 int
442 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
443 uint32_t *data)
444 {
445 #if defined(INTRNG)
446 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data));
447 #else
448 return (ENXIO);
449 #endif
450 }
451
452 int
453 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
454 uintptr_t *id)
455 {
456 struct dpaa2_devinfo *dinfo;
457
458 dinfo = device_get_ivars(child);
459
460 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
461 return (ENXIO);
462
463 if (type == PCI_ID_MSI)
464 return (dpaa2_mc_map_id(mcdev, child, id));
465
466 *id = dinfo->icid;
467 return (0);
468 }
469
470 /*
471 * For DPAA2 Management Complex bus driver interface.
472 */
473
474 int
475 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
476 {
477 struct dpaa2_mc_softc *sc;
478 struct dpaa2_devinfo *dinfo;
479 struct dpaa2_mc_devinfo *di;
480 struct rman *rm;
481 int error;
482
483 sc = device_get_softc(mcdev);
484 dinfo = device_get_ivars(dpaa2_dev);
485
486 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
487 return (EINVAL);
488
489 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
490 if (!di)
491 return (ENOMEM);
492 di->dpaa2_dev = dpaa2_dev;
493 di->flags = flags;
494 di->owners = 0;
495
496 /* Append a new managed DPAA2 device to the queue. */
497 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
498 mtx_lock(&sc->mdev_lock);
499 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link);
500 mtx_unlock(&sc->mdev_lock);
501
502 if (flags & DPAA2_MC_DEV_ALLOCATABLE) {
503 /* Select rman based on a type of the DPAA2 device. */
504 rm = dpaa2_mc_rman(mcdev, dinfo->dtype);
505 if (!rm)
506 return (ENOENT);
507 /* Manage DPAA2 device as an allocatable resource. */
508 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev,
509 (rman_res_t) dpaa2_dev);
510 if (error)
511 return (error);
512 }
513
514 return (0);
515 }
516
517 int
518 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
519 enum dpaa2_dev_type devtype)
520 {
521 struct rman *rm;
522 rman_res_t start, end;
523 int error;
524
525 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
526 return (EINVAL);
527
528 /* Select resource manager based on a type of the DPAA2 device. */
529 rm = dpaa2_mc_rman(mcdev, devtype);
530 if (!rm)
531 return (ENOENT);
532 /* Find first free DPAA2 device of the given type. */
533 error = rman_first_free_region(rm, &start, &end);
534 if (error)
535 return (error);
536
537 KASSERT(start == end, ("start != end, but should be the same pointer "
538 "to the DPAA2 device: start=%jx, end=%jx", start, end));
539
540 *dpaa2_dev = (device_t) start;
541
542 return (0);
543 }
544
545 int
546 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
547 enum dpaa2_dev_type devtype, uint32_t obj_id)
548 {
549 struct dpaa2_mc_softc *sc;
550 struct dpaa2_devinfo *dinfo;
551 struct dpaa2_mc_devinfo *di;
552 int error = ENOENT;
553
554 sc = device_get_softc(mcdev);
555
556 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
557 return (EINVAL);
558
559 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
560 mtx_lock(&sc->mdev_lock);
561
562 STAILQ_FOREACH(di, &sc->mdev_list, link) {
563 dinfo = device_get_ivars(di->dpaa2_dev);
564 if (dinfo->dtype == devtype && dinfo->id == obj_id) {
565 *dpaa2_dev = di->dpaa2_dev;
566 error = 0;
567 break;
568 }
569 }
570
571 mtx_unlock(&sc->mdev_lock);
572
573 return (error);
574 }
575
576 int
577 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
578 enum dpaa2_dev_type devtype)
579 {
580 struct dpaa2_mc_softc *sc;
581 struct dpaa2_devinfo *dinfo;
582 struct dpaa2_mc_devinfo *di;
583 device_t dev = NULL;
584 uint32_t owners = UINT32_MAX;
585 int error = ENOENT;
586
587 sc = device_get_softc(mcdev);
588
589 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
590 return (EINVAL);
591
592 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
593 mtx_lock(&sc->mdev_lock);
594
595 STAILQ_FOREACH(di, &sc->mdev_list, link) {
596 dinfo = device_get_ivars(di->dpaa2_dev);
597
598 if ((dinfo->dtype == devtype) &&
599 (di->flags & DPAA2_MC_DEV_SHAREABLE) &&
600 (di->owners < owners)) {
601 dev = di->dpaa2_dev;
602 owners = di->owners;
603 }
604 }
605 if (dev) {
606 *dpaa2_dev = dev;
607 error = 0;
608 }
609
610 mtx_unlock(&sc->mdev_lock);
611
612 return (error);
613 }
614
615 int
616 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
617 enum dpaa2_dev_type devtype)
618 {
619 struct dpaa2_mc_softc *sc;
620 struct dpaa2_mc_devinfo *di;
621 int error = ENOENT;
622
623 sc = device_get_softc(mcdev);
624
625 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
626 return (EINVAL);
627
628 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
629 mtx_lock(&sc->mdev_lock);
630
631 STAILQ_FOREACH(di, &sc->mdev_list, link) {
632 if (di->dpaa2_dev == dpaa2_dev &&
633 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
634 di->owners++;
635 error = 0;
636 break;
637 }
638 }
639
640 mtx_unlock(&sc->mdev_lock);
641
642 return (error);
643 }
644
645 int
646 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
647 enum dpaa2_dev_type devtype)
648 {
649 struct dpaa2_mc_softc *sc;
650 struct dpaa2_mc_devinfo *di;
651 int error = ENOENT;
652
653 sc = device_get_softc(mcdev);
654
655 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
656 return (EINVAL);
657
658 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
659 mtx_lock(&sc->mdev_lock);
660
661 STAILQ_FOREACH(di, &sc->mdev_list, link) {
662 if (di->dpaa2_dev == dpaa2_dev &&
663 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
664 di->owners -= di->owners > 0 ? 1 : 0;
665 error = 0;
666 break;
667 }
668 }
669
670 mtx_unlock(&sc->mdev_lock);
671
672 return (error);
673 }
674
675 /**
676 * @brief Convert DPAA2 device type to string.
677 */
678 const char *
679 dpaa2_ttos(enum dpaa2_dev_type type)
680 {
681 switch (type) {
682 case DPAA2_DEV_MC:
683 return ("mc"); /* NOTE: to print as information only. */
684 case DPAA2_DEV_RC:
685 return ("dprc");
686 case DPAA2_DEV_IO:
687 return ("dpio");
688 case DPAA2_DEV_NI:
689 return ("dpni");
690 case DPAA2_DEV_MCP:
691 return ("dpmcp");
692 case DPAA2_DEV_BP:
693 return ("dpbp");
694 case DPAA2_DEV_CON:
695 return ("dpcon");
696 case DPAA2_DEV_MAC:
697 return ("dpmac");
698 case DPAA2_DEV_MUX:
699 return ("dpdmux");
700 case DPAA2_DEV_SW:
701 return ("dpsw");
702 default:
703 break;
704 }
705 return ("notype");
706 }
707
708 /**
709 * @brief Convert string to DPAA2 device type.
710 */
711 enum dpaa2_dev_type
712 dpaa2_stot(const char *str)
713 {
714 if (COMPARE_TYPE(str, "dprc")) {
715 return (DPAA2_DEV_RC);
716 } else if (COMPARE_TYPE(str, "dpio")) {
717 return (DPAA2_DEV_IO);
718 } else if (COMPARE_TYPE(str, "dpni")) {
719 return (DPAA2_DEV_NI);
720 } else if (COMPARE_TYPE(str, "dpmcp")) {
721 return (DPAA2_DEV_MCP);
722 } else if (COMPARE_TYPE(str, "dpbp")) {
723 return (DPAA2_DEV_BP);
724 } else if (COMPARE_TYPE(str, "dpcon")) {
725 return (DPAA2_DEV_CON);
726 } else if (COMPARE_TYPE(str, "dpmac")) {
727 return (DPAA2_DEV_MAC);
728 } else if (COMPARE_TYPE(str, "dpdmux")) {
729 return (DPAA2_DEV_MUX);
730 } else if (COMPARE_TYPE(str, "dpsw")) {
731 return (DPAA2_DEV_SW);
732 }
733
734 return (DPAA2_DEV_NOTYPE);
735 }
736
737 /**
738 * @internal
739 */
740 static u_int
741 dpaa2_mc_get_xref(device_t mcdev, device_t child)
742 {
743 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
744 struct dpaa2_devinfo *dinfo = device_get_ivars(child);
745 #ifdef DEV_ACPI
746 u_int xref, devid;
747 #endif
748 #ifdef FDT
749 phandle_t msi_parent;
750 #endif
751 int error;
752
753 if (sc && dinfo) {
754 #ifdef DEV_ACPI
755 if (sc->acpi_based) {
756 /*
757 * NOTE: The first named component from the IORT table
758 * with the given name (as a substring) will be used.
759 */
760 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME,
761 dinfo->icid, &xref, &devid);
762 if (error)
763 return (0);
764 return (xref);
765 }
766 #endif
767 #ifdef FDT
768 if (!sc->acpi_based) {
769 /* FDT-based driver. */
770 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid,
771 &msi_parent, NULL);
772 if (error)
773 return (0);
774 return ((u_int) msi_parent);
775 }
776 #endif
777 }
778 return (0);
779 }
780
781 /**
782 * @internal
783 */
784 static u_int
785 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id)
786 {
787 struct dpaa2_devinfo *dinfo;
788 #ifdef DEV_ACPI
789 u_int xref, devid;
790 int error;
791 #endif
792
793 dinfo = device_get_ivars(child);
794 if (dinfo) {
795 /*
796 * The first named components from IORT table with the given
797 * name (as a substring) will be used.
798 */
799 #ifdef DEV_ACPI
800 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid,
801 &xref, &devid);
802 if (error == 0)
803 *id = devid;
804 else
805 #endif
806 *id = dinfo->icid; /* RID not in IORT, likely FW bug */
807
808 return (0);
809 }
810 return (ENXIO);
811 }
812
813 /**
814 * @internal
815 * @brief Obtain a resource manager based on the given type of the resource.
816 */
817 static struct rman *
818 dpaa2_mc_rman(device_t mcdev, int type)
819 {
820 struct dpaa2_mc_softc *sc;
821
822 sc = device_get_softc(mcdev);
823
824 switch (type) {
825 case DPAA2_DEV_IO:
826 return (&sc->dpio_rman);
827 case DPAA2_DEV_BP:
828 return (&sc->dpbp_rman);
829 case DPAA2_DEV_CON:
830 return (&sc->dpcon_rman);
831 case DPAA2_DEV_MCP:
832 return (&sc->dpmcp_rman);
833 default:
834 break;
835 }
836
837 return (NULL);
838 }
839
840 #if defined(INTRNG) && !defined(IOMMU)
841
842 /**
843 * @internal
844 * @brief Allocates requested number of MSIs.
845 *
846 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
847 * Total number of IRQs is limited to 32.
848 */
849 static int
850 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount,
851 int *irqs)
852 {
853 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
854 int msi_irqs[DPAA2_MC_MSI_COUNT];
855 int error;
856
857 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */
858 if (!sc->msi_allocated) {
859 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev,
860 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs);
861 if (error) {
862 device_printf(mcdev, "failed to pre-allocate %d MSIs: "
863 "error=%d\n", DPAA2_MC_MSI_COUNT, error);
864 return (error);
865 }
866
867 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
868 mtx_lock(&sc->msi_lock);
869 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
870 sc->msi[i].child = NULL;
871 sc->msi[i].irq = msi_irqs[i];
872 }
873 sc->msi_owner = child;
874 sc->msi_allocated = true;
875 mtx_unlock(&sc->msi_lock);
876 }
877
878 error = ENOENT;
879
880 /* Find the first free MSIs from the pre-allocated pool. */
881 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
882 mtx_lock(&sc->msi_lock);
883 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
884 if (sc->msi[i].child != NULL)
885 continue;
886 error = 0;
887 for (int j = 0; j < count; j++) {
888 if (i + j >= DPAA2_MC_MSI_COUNT) {
889 device_printf(mcdev, "requested %d MSIs exceed "
890 "limit of %d available\n", count,
891 DPAA2_MC_MSI_COUNT);
892 error = E2BIG;
893 break;
894 }
895 sc->msi[i + j].child = child;
896 irqs[j] = sc->msi[i + j].irq;
897 }
898 break;
899 }
900 mtx_unlock(&sc->msi_lock);
901
902 return (error);
903 }
904
905 /**
906 * @internal
907 * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
908 *
909 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
910 * Total number of IRQs is limited to 32.
911 * NOTE: MSIs are kept allocated in the kernel as a part of the pool.
912 */
913 static int
914 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs)
915 {
916 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
917
918 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
919 mtx_lock(&sc->msi_lock);
920 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
921 if (sc->msi[i].child != child)
922 continue;
923 for (int j = 0; j < count; j++) {
924 if (sc->msi[i].irq == irqs[j]) {
925 sc->msi[i].child = NULL;
926 break;
927 }
928 }
929 }
930 mtx_unlock(&sc->msi_lock);
931
932 return (0);
933 }
934
935 /**
936 * @internal
937 * @brief Provides address to write to and data according to the given MSI from
938 * the pre-allocated pool.
939 *
940 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
941 * Total number of IRQs is limited to 32.
942 */
943 static int
944 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr,
945 uint32_t *data)
946 {
947 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
948 int error = EINVAL;
949
950 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
951 mtx_lock(&sc->msi_lock);
952 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
953 if (sc->msi[i].child == child && sc->msi[i].irq == irq) {
954 error = 0;
955 break;
956 }
957 }
958 mtx_unlock(&sc->msi_lock);
959 if (error)
960 return (error);
961
962 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev,
963 sc->msi_owner), irq, addr, data));
964 }
965
966 #endif /* defined(INTRNG) && !defined(IOMMU) */
967
968 static device_method_t dpaa2_mc_methods[] = {
969 DEVMETHOD_END
970 };
971
972 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods,
973 sizeof(struct dpaa2_mc_softc));
Cache object: 808477fe0a928f2b5311f2a404036828
|