1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 *
4 * This software was developed by Andrew Turner under
5 * the sponsorship of the FreeBSD Foundation.
6 *
7 * This software was developed by Semihalf under
8 * the sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 #include <sys/interrupt.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71
72 #include "gic_if.h"
73 #include "pic_if.h"
74 #include "msi_if.h"
75
76 #include <arm/arm/gic_common.h>
77 #include "gic_v3_reg.h"
78 #include "gic_v3_var.h"
79
80 static bus_print_child_t gic_v3_print_child;
81 static bus_get_domain_t gic_v3_get_domain;
82 static bus_read_ivar_t gic_v3_read_ivar;
83 static bus_write_ivar_t gic_v3_write_ivar;
84 static bus_alloc_resource_t gic_v3_alloc_resource;
85
86 static pic_disable_intr_t gic_v3_disable_intr;
87 static pic_enable_intr_t gic_v3_enable_intr;
88 static pic_map_intr_t gic_v3_map_intr;
89 static pic_setup_intr_t gic_v3_setup_intr;
90 static pic_teardown_intr_t gic_v3_teardown_intr;
91 static pic_post_filter_t gic_v3_post_filter;
92 static pic_post_ithread_t gic_v3_post_ithread;
93 static pic_pre_ithread_t gic_v3_pre_ithread;
94 static pic_bind_intr_t gic_v3_bind_intr;
95 #ifdef SMP
96 static pic_init_secondary_t gic_v3_init_secondary;
97 static pic_ipi_send_t gic_v3_ipi_send;
98 static pic_ipi_setup_t gic_v3_ipi_setup;
99 #endif
100
101 static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
102 static gic_alloc_msi_t gic_v3_gic_alloc_msi;
103 static gic_release_msi_t gic_v3_gic_release_msi;
104 static gic_alloc_msix_t gic_v3_gic_alloc_msix;
105 static gic_release_msix_t gic_v3_gic_release_msix;
106
107 static msi_alloc_msi_t gic_v3_alloc_msi;
108 static msi_release_msi_t gic_v3_release_msi;
109 static msi_alloc_msix_t gic_v3_alloc_msix;
110 static msi_release_msix_t gic_v3_release_msix;
111 static msi_map_msi_t gic_v3_map_msi;
112
113 static u_int gic_irq_cpu;
114 #ifdef SMP
115 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
116 static u_int sgi_first_unused = GIC_FIRST_SGI;
117 #endif
118
119 static device_method_t gic_v3_methods[] = {
120 /* Device interface */
121 DEVMETHOD(device_detach, gic_v3_detach),
122
123 /* Bus interface */
124 DEVMETHOD(bus_print_child, gic_v3_print_child),
125 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
126 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
127 DEVMETHOD(bus_write_ivar, gic_v3_write_ivar),
128 DEVMETHOD(bus_alloc_resource, gic_v3_alloc_resource),
129 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
130
131 /* Interrupt controller interface */
132 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
133 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
134 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
135 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
136 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
137 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
138 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
139 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
140 #ifdef SMP
141 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
142 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
143 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
144 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
145 #endif
146
147 /* MSI/MSI-X */
148 DEVMETHOD(msi_alloc_msi, gic_v3_alloc_msi),
149 DEVMETHOD(msi_release_msi, gic_v3_release_msi),
150 DEVMETHOD(msi_alloc_msix, gic_v3_alloc_msix),
151 DEVMETHOD(msi_release_msix, gic_v3_release_msix),
152 DEVMETHOD(msi_map_msi, gic_v3_map_msi),
153
154 /* GIC */
155 DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
156 DEVMETHOD(gic_alloc_msi, gic_v3_gic_alloc_msi),
157 DEVMETHOD(gic_release_msi, gic_v3_gic_release_msi),
158 DEVMETHOD(gic_alloc_msix, gic_v3_gic_alloc_msix),
159 DEVMETHOD(gic_release_msix, gic_v3_gic_release_msix),
160
161 /* End */
162 DEVMETHOD_END
163 };
164
165 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
166 sizeof(struct gic_v3_softc));
167
168 /*
169 * Driver-specific definitions.
170 */
171 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
172
173 /*
174 * Helper functions and definitions.
175 */
176 /* Destination registers, either Distributor or Re-Distributor */
177 enum gic_v3_xdist {
178 DIST = 0,
179 REDIST,
180 };
181
182 struct gic_v3_irqsrc {
183 struct intr_irqsrc gi_isrc;
184 uint32_t gi_irq;
185 enum intr_polarity gi_pol;
186 enum intr_trigger gi_trig;
187 #define GI_FLAG_MSI (1 << 1) /* This interrupt source should only */
188 /* be used for MSI/MSI-X interrupts */
189 #define GI_FLAG_MSI_USED (1 << 2) /* This irq is already allocated */
190 /* for a MSI/MSI-X interrupt */
191 u_int gi_flags;
192 };
193
194 /* Helper routines starting with gic_v3_ */
195 static int gic_v3_dist_init(struct gic_v3_softc *);
196 static int gic_v3_redist_alloc(struct gic_v3_softc *);
197 static int gic_v3_redist_find(struct gic_v3_softc *);
198 static int gic_v3_redist_init(struct gic_v3_softc *);
199 static int gic_v3_cpu_init(struct gic_v3_softc *);
200 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
201
202 /* A sequence of init functions for primary (boot) CPU */
203 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
204 /* Primary CPU initialization sequence */
205 static gic_v3_initseq_t gic_v3_primary_init[] = {
206 gic_v3_dist_init,
207 gic_v3_redist_alloc,
208 gic_v3_redist_init,
209 gic_v3_cpu_init,
210 NULL
211 };
212
213 #ifdef SMP
214 /* Secondary CPU initialization sequence */
215 static gic_v3_initseq_t gic_v3_secondary_init[] = {
216 gic_v3_redist_init,
217 gic_v3_cpu_init,
218 NULL
219 };
220 #endif
221
222 uint32_t
223 gic_r_read_4(device_t dev, bus_size_t offset)
224 {
225 struct gic_v3_softc *sc;
226 struct resource *rdist;
227
228 sc = device_get_softc(dev);
229 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
230 return (bus_read_4(rdist, offset));
231 }
232
233 uint64_t
234 gic_r_read_8(device_t dev, bus_size_t offset)
235 {
236 struct gic_v3_softc *sc;
237 struct resource *rdist;
238
239 sc = device_get_softc(dev);
240 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
241 return (bus_read_8(rdist, offset));
242 }
243
244 void
245 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
246 {
247 struct gic_v3_softc *sc;
248 struct resource *rdist;
249
250 sc = device_get_softc(dev);
251 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
252 bus_write_4(rdist, offset, val);
253 }
254
255 void
256 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
257 {
258 struct gic_v3_softc *sc;
259 struct resource *rdist;
260
261 sc = device_get_softc(dev);
262 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
263 bus_write_8(rdist, offset, val);
264 }
265
266 static void
267 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
268 {
269 struct gic_v3_softc *sc;
270 int i;
271
272 sc = device_get_softc(dev);
273
274 KASSERT((start + count) < sc->gic_nirqs,
275 ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
276 start, count, sc->gic_nirqs));
277 for (i = 0; i < count; i++) {
278 KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
279 ("%s: MSI interrupt %d already has a handler", __func__,
280 count + i));
281 KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
282 ("%s: MSI interrupt %d already has a polarity", __func__,
283 count + i));
284 KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
285 ("%s: MSI interrupt %d already has a trigger", __func__,
286 count + i));
287 sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
288 sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
289 sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
290 }
291 }
292
293 /*
294 * Device interface.
295 */
296 int
297 gic_v3_attach(device_t dev)
298 {
299 struct gic_v3_softc *sc;
300 gic_v3_initseq_t *init_func;
301 uint32_t typer;
302 int rid;
303 int err;
304 size_t i;
305 u_int irq;
306 const char *name;
307
308 sc = device_get_softc(dev);
309 sc->gic_registered = FALSE;
310 sc->dev = dev;
311 err = 0;
312
313 /* Initialize mutex */
314 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
315
316 /*
317 * Allocate array of struct resource.
318 * One entry for Distributor and all remaining for Re-Distributor.
319 */
320 sc->gic_res = malloc(
321 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
322 M_GIC_V3, M_WAITOK);
323
324 /* Now allocate corresponding resources */
325 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
326 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
327 &rid, RF_ACTIVE);
328 if (sc->gic_res[rid] == NULL)
329 return (ENXIO);
330 }
331
332 /*
333 * Distributor interface
334 */
335 sc->gic_dist = sc->gic_res[0];
336
337 /*
338 * Re-Dristributor interface
339 */
340 /* Allocate space under region descriptions */
341 sc->gic_redists.regions = malloc(
342 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
343 M_GIC_V3, M_WAITOK);
344
345 /* Fill-up bus_space information for each region. */
346 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
347 sc->gic_redists.regions[i] = sc->gic_res[rid];
348
349 /* Get the number of supported SPI interrupts */
350 typer = gic_d_read(sc, 4, GICD_TYPER);
351 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
352 if (sc->gic_nirqs > GIC_I_NUM_MAX)
353 sc->gic_nirqs = GIC_I_NUM_MAX;
354
355 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
356 M_GIC_V3, M_WAITOK | M_ZERO);
357 name = device_get_nameunit(dev);
358 for (irq = 0; irq < sc->gic_nirqs; irq++) {
359 struct intr_irqsrc *isrc;
360
361 sc->gic_irqs[irq].gi_irq = irq;
362 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
363 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
364
365 isrc = &sc->gic_irqs[irq].gi_isrc;
366 if (irq <= GIC_LAST_SGI) {
367 err = intr_isrc_register(isrc, sc->dev,
368 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
369 } else if (irq <= GIC_LAST_PPI) {
370 err = intr_isrc_register(isrc, sc->dev,
371 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
372 } else {
373 err = intr_isrc_register(isrc, sc->dev, 0,
374 "%s,s%u", name, irq - GIC_FIRST_SPI);
375 }
376 if (err != 0) {
377 /* XXX call intr_isrc_deregister() */
378 free(sc->gic_irqs, M_DEVBUF);
379 return (err);
380 }
381 }
382
383 mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
384 if (sc->gic_mbi_start > 0) {
385 gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
386 sc->gic_mbi_end - sc->gic_mbi_start);
387
388 if (bootverbose) {
389 device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
390 sc->gic_mbi_end);
391 }
392 }
393
394 /*
395 * Read the Peripheral ID2 register. This is an implementation
396 * defined register, but seems to be implemented in all GICv3
397 * parts and Linux expects it to be there.
398 */
399 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
400
401 /* Get the number of supported interrupt identifier bits */
402 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
403
404 if (bootverbose) {
405 device_printf(dev, "SPIs: %u, IDs: %u\n",
406 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
407 }
408
409 /* Train init sequence for boot CPU */
410 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
411 err = (*init_func)(sc);
412 if (err != 0)
413 return (err);
414 }
415
416 return (0);
417 }
418
419 int
420 gic_v3_detach(device_t dev)
421 {
422 struct gic_v3_softc *sc;
423 size_t i;
424 int rid;
425
426 sc = device_get_softc(dev);
427
428 if (device_is_attached(dev)) {
429 /*
430 * XXX: We should probably deregister PIC
431 */
432 if (sc->gic_registered)
433 panic("Trying to detach registered PIC");
434 }
435 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
436 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
437
438 for (i = 0; i <= mp_maxid; i++)
439 free(sc->gic_redists.pcpu[i], M_GIC_V3);
440
441 free(sc->ranges, M_GIC_V3);
442 free(sc->gic_res, M_GIC_V3);
443 free(sc->gic_redists.regions, M_GIC_V3);
444
445 return (0);
446 }
447
448 static int
449 gic_v3_print_child(device_t bus, device_t child)
450 {
451 struct resource_list *rl;
452 int retval = 0;
453
454 rl = BUS_GET_RESOURCE_LIST(bus, child);
455 KASSERT(rl != NULL, ("%s: No resource list", __func__));
456 retval += bus_print_child_header(bus, child);
457 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
458 retval += bus_print_child_footer(bus, child);
459
460 return (retval);
461 }
462
463 static int
464 gic_v3_get_domain(device_t dev, device_t child, int *domain)
465 {
466 struct gic_v3_devinfo *di;
467
468 di = device_get_ivars(child);
469 if (di->gic_domain < 0)
470 return (ENOENT);
471
472 *domain = di->gic_domain;
473 return (0);
474 }
475
476 static int
477 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
478 {
479 struct gic_v3_softc *sc;
480 struct gic_v3_devinfo *di;
481
482 sc = device_get_softc(dev);
483
484 switch (which) {
485 case GICV3_IVAR_NIRQS:
486 *result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
487 return (0);
488 case GICV3_IVAR_REDIST:
489 *result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
490 return (0);
491 case GIC_IVAR_HW_REV:
492 KASSERT(
493 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
494 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
495 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
496 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
497 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
498 return (0);
499 case GIC_IVAR_BUS:
500 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
501 ("gic_v3_read_ivar: Unknown bus type"));
502 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
503 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
504 *result = sc->gic_bus;
505 return (0);
506 case GIC_IVAR_VGIC:
507 di = device_get_ivars(child);
508 if (di == NULL)
509 return (EINVAL);
510 *result = di->is_vgic;
511 return (0);
512 }
513
514 return (ENOENT);
515 }
516
517 static int
518 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
519 {
520 switch(which) {
521 case GICV3_IVAR_NIRQS:
522 case GICV3_IVAR_REDIST:
523 case GIC_IVAR_HW_REV:
524 case GIC_IVAR_BUS:
525 return (EINVAL);
526 }
527
528 return (ENOENT);
529 }
530
531 static struct resource *
532 gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid,
533 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
534 {
535 struct gic_v3_softc *sc;
536 struct resource_list_entry *rle;
537 struct resource_list *rl;
538 int j;
539
540 /* We only allocate memory */
541 if (type != SYS_RES_MEMORY)
542 return (NULL);
543
544 sc = device_get_softc(bus);
545
546 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
547 rl = BUS_GET_RESOURCE_LIST(bus, child);
548 if (rl == NULL)
549 return (NULL);
550
551 /* Find defaults for this rid */
552 rle = resource_list_find(rl, type, *rid);
553 if (rle == NULL)
554 return (NULL);
555
556 start = rle->start;
557 end = rle->end;
558 count = rle->count;
559 }
560
561 /* Remap through ranges property */
562 for (j = 0; j < sc->nranges; j++) {
563 if (start >= sc->ranges[j].bus && end <
564 sc->ranges[j].bus + sc->ranges[j].size) {
565 start -= sc->ranges[j].bus;
566 start += sc->ranges[j].host;
567 end -= sc->ranges[j].bus;
568 end += sc->ranges[j].host;
569 break;
570 }
571 }
572 if (j == sc->nranges && sc->nranges != 0) {
573 if (bootverbose)
574 device_printf(bus, "Could not map resource "
575 "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
576
577 return (NULL);
578 }
579
580 return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
581 count, flags));
582 }
583
584 int
585 arm_gic_v3_intr(void *arg)
586 {
587 struct gic_v3_softc *sc = arg;
588 struct gic_v3_irqsrc *gi;
589 struct intr_pic *pic;
590 uint64_t active_irq;
591 struct trapframe *tf;
592
593 pic = sc->gic_pic;
594
595 while (1) {
596 if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
597 /*
598 * Hardware: Cavium ThunderX
599 * Chip revision: Pass 1.0 (early version)
600 * Pass 1.1 (production)
601 * ERRATUM: 22978, 23154
602 */
603 __asm __volatile(
604 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
605 "mrs %0, ICC_IAR1_EL1 \n"
606 "nop;nop;nop;nop; \n"
607 "dsb sy \n"
608 : "=&r" (active_irq));
609 } else {
610 active_irq = gic_icc_read(IAR1);
611 }
612
613 if (active_irq >= GIC_FIRST_LPI) {
614 intr_child_irq_handler(pic, active_irq);
615 continue;
616 }
617
618 if (__predict_false(active_irq >= sc->gic_nirqs))
619 return (FILTER_HANDLED);
620
621 tf = curthread->td_intr_frame;
622 gi = &sc->gic_irqs[active_irq];
623 if (active_irq <= GIC_LAST_SGI) {
624 /* Call EOI for all IPI before dispatch. */
625 gic_icc_write(EOIR1, (uint64_t)active_irq);
626 #ifdef SMP
627 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
628 #else
629 device_printf(sc->dev, "SGI %ju on UP system detected\n",
630 (uintmax_t)(active_irq - GIC_FIRST_SGI));
631 #endif
632 } else if (active_irq >= GIC_FIRST_PPI &&
633 active_irq <= GIC_LAST_SPI) {
634 if (gi->gi_trig == INTR_TRIGGER_EDGE)
635 gic_icc_write(EOIR1, gi->gi_irq);
636
637 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
638 if (gi->gi_trig != INTR_TRIGGER_EDGE)
639 gic_icc_write(EOIR1, gi->gi_irq);
640 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
641 device_printf(sc->dev,
642 "Stray irq %lu disabled\n", active_irq);
643 }
644 }
645 }
646 }
647
648 #ifdef FDT
649 static int
650 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
651 enum intr_polarity *polp, enum intr_trigger *trigp)
652 {
653 u_int irq;
654
655 if (ncells < 3)
656 return (EINVAL);
657
658 /*
659 * The 1st cell is the interrupt type:
660 * 0 = SPI
661 * 1 = PPI
662 * The 2nd cell contains the interrupt number:
663 * [0 - 987] for SPI
664 * [0 - 15] for PPI
665 * The 3rd cell is the flags, encoded as follows:
666 * bits[3:0] trigger type and level flags
667 * 1 = edge triggered
668 * 2 = edge triggered (PPI only)
669 * 4 = level-sensitive
670 * 8 = level-sensitive (PPI only)
671 */
672 switch (cells[0]) {
673 case 0:
674 irq = GIC_FIRST_SPI + cells[1];
675 /* SPI irq is checked later. */
676 break;
677 case 1:
678 irq = GIC_FIRST_PPI + cells[1];
679 if (irq > GIC_LAST_PPI) {
680 device_printf(dev, "unsupported PPI interrupt "
681 "number %u\n", cells[1]);
682 return (EINVAL);
683 }
684 break;
685 default:
686 device_printf(dev, "unsupported interrupt type "
687 "configuration %u\n", cells[0]);
688 return (EINVAL);
689 }
690
691 switch (cells[2] & FDT_INTR_MASK) {
692 case FDT_INTR_EDGE_RISING:
693 *trigp = INTR_TRIGGER_EDGE;
694 *polp = INTR_POLARITY_HIGH;
695 break;
696 case FDT_INTR_EDGE_FALLING:
697 *trigp = INTR_TRIGGER_EDGE;
698 *polp = INTR_POLARITY_LOW;
699 break;
700 case FDT_INTR_LEVEL_HIGH:
701 *trigp = INTR_TRIGGER_LEVEL;
702 *polp = INTR_POLARITY_HIGH;
703 break;
704 case FDT_INTR_LEVEL_LOW:
705 *trigp = INTR_TRIGGER_LEVEL;
706 *polp = INTR_POLARITY_LOW;
707 break;
708 default:
709 device_printf(dev, "unsupported trigger/polarity "
710 "configuration 0x%02x\n", cells[2]);
711 return (EINVAL);
712 }
713
714 /* Check the interrupt is valid */
715 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
716 return (EINVAL);
717
718 *irqp = irq;
719 return (0);
720 }
721 #endif
722
723 static int
724 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
725 enum intr_polarity *polp, enum intr_trigger *trigp)
726 {
727 struct gic_v3_irqsrc *gi;
728
729 /* SPI-mapped MSI */
730 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
731 if (gi == NULL)
732 return (ENXIO);
733
734 *irqp = gi->gi_irq;
735
736 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
737 *polp = INTR_POLARITY_HIGH;
738 *trigp = INTR_TRIGGER_EDGE;
739
740 return (0);
741 }
742
743 static int
744 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
745 enum intr_polarity *polp, enum intr_trigger *trigp)
746 {
747 struct gic_v3_softc *sc;
748 enum intr_polarity pol;
749 enum intr_trigger trig;
750 struct intr_map_data_msi *dam;
751 #ifdef FDT
752 struct intr_map_data_fdt *daf;
753 #endif
754 #ifdef DEV_ACPI
755 struct intr_map_data_acpi *daa;
756 #endif
757 u_int irq;
758
759 sc = device_get_softc(dev);
760
761 switch (data->type) {
762 #ifdef FDT
763 case INTR_MAP_DATA_FDT:
764 daf = (struct intr_map_data_fdt *)data;
765 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
766 &trig) != 0)
767 return (EINVAL);
768 break;
769 #endif
770 #ifdef DEV_ACPI
771 case INTR_MAP_DATA_ACPI:
772 daa = (struct intr_map_data_acpi *)data;
773 irq = daa->irq;
774 pol = daa->pol;
775 trig = daa->trig;
776 break;
777 #endif
778 case INTR_MAP_DATA_MSI:
779 /* SPI-mapped MSI */
780 dam = (struct intr_map_data_msi *)data;
781 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
782 return (EINVAL);
783 break;
784 default:
785 return (EINVAL);
786 }
787
788 if (irq >= sc->gic_nirqs)
789 return (EINVAL);
790 switch (pol) {
791 case INTR_POLARITY_CONFORM:
792 case INTR_POLARITY_LOW:
793 case INTR_POLARITY_HIGH:
794 break;
795 default:
796 return (EINVAL);
797 }
798 switch (trig) {
799 case INTR_TRIGGER_CONFORM:
800 case INTR_TRIGGER_EDGE:
801 case INTR_TRIGGER_LEVEL:
802 break;
803 default:
804 return (EINVAL);
805 }
806
807 *irqp = irq;
808 if (polp != NULL)
809 *polp = pol;
810 if (trigp != NULL)
811 *trigp = trig;
812 return (0);
813 }
814
815 static int
816 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
817 struct intr_irqsrc **isrcp)
818 {
819 struct gic_v3_softc *sc;
820 int error;
821 u_int irq;
822
823 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
824 if (error == 0) {
825 sc = device_get_softc(dev);
826 *isrcp = GIC_INTR_ISRC(sc, irq);
827 }
828 return (error);
829 }
830
831 static int
832 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
833 struct resource *res, struct intr_map_data *data)
834 {
835 struct gic_v3_softc *sc = device_get_softc(dev);
836 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
837 enum intr_trigger trig;
838 enum intr_polarity pol;
839 uint32_t reg;
840 u_int irq;
841 int error;
842
843 if (data == NULL)
844 return (ENOTSUP);
845
846 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
847 if (error != 0)
848 return (error);
849
850 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
851 trig == INTR_TRIGGER_CONFORM)
852 return (EINVAL);
853
854 /* Compare config if this is not first setup. */
855 if (isrc->isrc_handlers != 0) {
856 if (pol != gi->gi_pol || trig != gi->gi_trig)
857 return (EINVAL);
858 else
859 return (0);
860 }
861
862 /* For MSI/MSI-X we should have already configured these */
863 if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
864 gi->gi_pol = pol;
865 gi->gi_trig = trig;
866 }
867
868 /*
869 * XXX - In case that per CPU interrupt is going to be enabled in time
870 * when SMP is already started, we need some IPI call which
871 * enables it on others CPUs. Further, it's more complicated as
872 * pic_enable_source() and pic_disable_source() should act on
873 * per CPU basis only. Thus, it should be solved here somehow.
874 */
875 if (isrc->isrc_flags & INTR_ISRCF_PPI)
876 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
877
878 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
879 mtx_lock_spin(&sc->gic_mtx);
880
881 /* Set the trigger and polarity */
882 if (irq <= GIC_LAST_PPI)
883 reg = gic_r_read(sc, 4,
884 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
885 else
886 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
887 if (trig == INTR_TRIGGER_LEVEL)
888 reg &= ~(2 << ((irq % 16) * 2));
889 else
890 reg |= 2 << ((irq % 16) * 2);
891
892 if (irq <= GIC_LAST_PPI) {
893 gic_r_write(sc, 4,
894 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
895 gic_v3_wait_for_rwp(sc, REDIST);
896 } else {
897 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
898 gic_v3_wait_for_rwp(sc, DIST);
899 }
900
901 mtx_unlock_spin(&sc->gic_mtx);
902
903 gic_v3_bind_intr(dev, isrc);
904 }
905
906 return (0);
907 }
908
909 static int
910 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
911 struct resource *res, struct intr_map_data *data)
912 {
913 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
914
915 if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
916 gi->gi_pol = INTR_POLARITY_CONFORM;
917 gi->gi_trig = INTR_TRIGGER_CONFORM;
918 }
919
920 return (0);
921 }
922
923 static void
924 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
925 {
926 struct gic_v3_softc *sc;
927 struct gic_v3_irqsrc *gi;
928 u_int irq;
929
930 sc = device_get_softc(dev);
931 gi = (struct gic_v3_irqsrc *)isrc;
932 irq = gi->gi_irq;
933
934 if (irq <= GIC_LAST_PPI) {
935 /* SGIs and PPIs in corresponding Re-Distributor */
936 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
937 GICD_I_MASK(irq));
938 gic_v3_wait_for_rwp(sc, REDIST);
939 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
940 /* SPIs in distributor */
941 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
942 gic_v3_wait_for_rwp(sc, DIST);
943 } else
944 panic("%s: Unsupported IRQ %u", __func__, irq);
945 }
946
947 static void
948 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
949 {
950 struct gic_v3_softc *sc;
951 struct gic_v3_irqsrc *gi;
952 u_int irq;
953
954 sc = device_get_softc(dev);
955 gi = (struct gic_v3_irqsrc *)isrc;
956 irq = gi->gi_irq;
957
958 if (irq <= GIC_LAST_PPI) {
959 /* SGIs and PPIs in corresponding Re-Distributor */
960 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
961 GICD_I_MASK(irq));
962 gic_v3_wait_for_rwp(sc, REDIST);
963 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
964 /* SPIs in distributor */
965 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
966 gic_v3_wait_for_rwp(sc, DIST);
967 } else
968 panic("%s: Unsupported IRQ %u", __func__, irq);
969 }
970
971 static void
972 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
973 {
974 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
975
976 gic_v3_disable_intr(dev, isrc);
977 gic_icc_write(EOIR1, gi->gi_irq);
978 }
979
980 static void
981 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
982 {
983
984 gic_v3_enable_intr(dev, isrc);
985 }
986
987 static void
988 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
989 {
990 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
991
992 if (gi->gi_trig == INTR_TRIGGER_EDGE)
993 return;
994
995 gic_icc_write(EOIR1, gi->gi_irq);
996 }
997
998 static int
999 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1000 {
1001 struct gic_v3_softc *sc;
1002 struct gic_v3_irqsrc *gi;
1003 int cpu;
1004
1005 gi = (struct gic_v3_irqsrc *)isrc;
1006 if (gi->gi_irq <= GIC_LAST_PPI)
1007 return (EINVAL);
1008
1009 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
1010 ("%s: Attempting to bind an invalid IRQ", __func__));
1011
1012 sc = device_get_softc(dev);
1013
1014 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1015 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1016 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1017 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
1018 CPU_AFFINITY(gic_irq_cpu));
1019 } else {
1020 /*
1021 * We can only bind to a single CPU so select
1022 * the first CPU found.
1023 */
1024 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
1025 gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
1026 }
1027
1028 return (0);
1029 }
1030
1031 #ifdef SMP
1032 static void
1033 gic_v3_init_secondary(device_t dev)
1034 {
1035 device_t child;
1036 struct gic_v3_softc *sc;
1037 gic_v3_initseq_t *init_func;
1038 struct intr_irqsrc *isrc;
1039 u_int cpu, irq;
1040 int err, i;
1041
1042 sc = device_get_softc(dev);
1043 cpu = PCPU_GET(cpuid);
1044
1045 /* Train init sequence for boot CPU */
1046 for (init_func = gic_v3_secondary_init; *init_func != NULL;
1047 init_func++) {
1048 err = (*init_func)(sc);
1049 if (err != 0) {
1050 device_printf(dev,
1051 "Could not initialize GIC for CPU%u\n", cpu);
1052 return;
1053 }
1054 }
1055
1056 /* Unmask attached SGI interrupts. */
1057 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
1058 isrc = GIC_INTR_ISRC(sc, irq);
1059 if (intr_isrc_init_on_cpu(isrc, cpu))
1060 gic_v3_enable_intr(dev, isrc);
1061 }
1062
1063 /* Unmask attached PPI interrupts. */
1064 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1065 isrc = GIC_INTR_ISRC(sc, irq);
1066 if (intr_isrc_init_on_cpu(isrc, cpu))
1067 gic_v3_enable_intr(dev, isrc);
1068 }
1069
1070 for (i = 0; i < sc->gic_nchildren; i++) {
1071 child = sc->gic_children[i];
1072 PIC_INIT_SECONDARY(child);
1073 }
1074 }
1075
1076 static void
1077 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1078 u_int ipi)
1079 {
1080 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1081 uint64_t aff, val, irq;
1082 int i;
1083
1084 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1085 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
1086 aff = GIC_AFFINITY(0);
1087 irq = gi->gi_irq;
1088 val = 0;
1089
1090 /* Iterate through all CPUs in set */
1091 for (i = 0; i <= mp_maxid; i++) {
1092 /* Move to the next affinity group */
1093 if (aff != GIC_AFFINITY(i)) {
1094 /* Send the IPI */
1095 if (val != 0) {
1096 gic_icc_write(SGI1R, val);
1097 val = 0;
1098 }
1099 aff = GIC_AFFINITY(i);
1100 }
1101
1102 /* Send the IPI to this cpu */
1103 if (CPU_ISSET(i, &cpus)) {
1104 #define ICC_SGI1R_AFFINITY(aff) \
1105 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
1106 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
1107 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1108 /* Set the affinity when the first at this level */
1109 if (val == 0)
1110 val = ICC_SGI1R_AFFINITY(aff) |
1111 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1112 /* Set the bit to send the IPI to te CPU */
1113 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1114 }
1115 }
1116
1117 /* Send the IPI to the last cpu affinity group */
1118 if (val != 0)
1119 gic_icc_write(SGI1R, val);
1120 #undef GIC_AFF_MASK
1121 #undef GIC_AFFINITY
1122 }
1123
1124 static int
1125 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1126 {
1127 struct intr_irqsrc *isrc;
1128 struct gic_v3_softc *sc = device_get_softc(dev);
1129
1130 if (sgi_first_unused > GIC_LAST_SGI)
1131 return (ENOSPC);
1132
1133 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1134 sgi_to_ipi[sgi_first_unused++] = ipi;
1135
1136 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1137
1138 *isrcp = isrc;
1139 return (0);
1140 }
1141 #endif /* SMP */
1142
1143 /*
1144 * Helper routines
1145 */
1146 static void
1147 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1148 {
1149 struct resource *res;
1150 u_int cpuid;
1151 size_t us_left = 1000000;
1152
1153 cpuid = PCPU_GET(cpuid);
1154
1155 switch (xdist) {
1156 case DIST:
1157 res = sc->gic_dist;
1158 break;
1159 case REDIST:
1160 res = &sc->gic_redists.pcpu[cpuid]->res;
1161 break;
1162 default:
1163 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1164 return;
1165 }
1166
1167 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1168 DELAY(1);
1169 if (us_left-- == 0)
1170 panic("GICD Register write pending for too long");
1171 }
1172 }
1173
1174 /* CPU interface. */
1175 static __inline void
1176 gic_v3_cpu_priority(uint64_t mask)
1177 {
1178
1179 /* Set prority mask */
1180 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1181 }
1182
1183 static int
1184 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1185 {
1186 uint64_t sre;
1187 u_int cpuid;
1188
1189 cpuid = PCPU_GET(cpuid);
1190 /*
1191 * Set the SRE bit to enable access to GIC CPU interface
1192 * via system registers.
1193 */
1194 sre = READ_SPECIALREG(icc_sre_el1);
1195 sre |= ICC_SRE_EL1_SRE;
1196 WRITE_SPECIALREG(icc_sre_el1, sre);
1197 isb();
1198 /*
1199 * Now ensure that the bit is set.
1200 */
1201 sre = READ_SPECIALREG(icc_sre_el1);
1202 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1203 /* We are done. This was disabled in EL2 */
1204 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1205 "via system registers\n", cpuid);
1206 return (ENXIO);
1207 } else if (bootverbose) {
1208 device_printf(sc->dev,
1209 "CPU%u enabled CPU interface via system registers\n",
1210 cpuid);
1211 }
1212
1213 return (0);
1214 }
1215
1216 static int
1217 gic_v3_cpu_init(struct gic_v3_softc *sc)
1218 {
1219 int err;
1220
1221 /* Enable access to CPU interface via system registers */
1222 err = gic_v3_cpu_enable_sre(sc);
1223 if (err != 0)
1224 return (err);
1225 /* Priority mask to minimum - accept all interrupts */
1226 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1227 /* Disable EOI mode */
1228 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1229 /* Enable group 1 (insecure) interrups */
1230 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1231
1232 return (0);
1233 }
1234
1235 /* Distributor */
1236 static int
1237 gic_v3_dist_init(struct gic_v3_softc *sc)
1238 {
1239 uint64_t aff;
1240 u_int i;
1241
1242 /*
1243 * 1. Disable the Distributor
1244 */
1245 gic_d_write(sc, 4, GICD_CTLR, 0);
1246 gic_v3_wait_for_rwp(sc, DIST);
1247
1248 /*
1249 * 2. Configure the Distributor
1250 */
1251 /* Set all SPIs to be Group 1 Non-secure */
1252 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1253 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1254
1255 /* Set all global interrupts to be level triggered, active low. */
1256 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1257 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1258
1259 /* Set priority to all shared interrupts */
1260 for (i = GIC_FIRST_SPI;
1261 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1262 /* Set highest priority */
1263 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1264 }
1265
1266 /*
1267 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1268 * Re-Distributor registers.
1269 */
1270 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1271 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1272
1273 gic_v3_wait_for_rwp(sc, DIST);
1274
1275 /*
1276 * 3. Enable Distributor
1277 */
1278 /* Enable Distributor with ARE, Group 1 */
1279 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1280 GICD_CTLR_G1);
1281
1282 /*
1283 * 4. Route all interrupts to boot CPU.
1284 */
1285 aff = CPU_AFFINITY(0);
1286 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1287 gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1288
1289 return (0);
1290 }
1291
1292 /* Re-Distributor */
1293 static int
1294 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1295 {
1296 u_int cpuid;
1297
1298 /* Allocate struct resource for all CPU's Re-Distributor registers */
1299 for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1300 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1301 sc->gic_redists.pcpu[cpuid] =
1302 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1303 M_GIC_V3, M_WAITOK);
1304 else
1305 sc->gic_redists.pcpu[cpuid] = NULL;
1306 return (0);
1307 }
1308
1309 static int
1310 gic_v3_redist_find(struct gic_v3_softc *sc)
1311 {
1312 struct resource r_res;
1313 bus_space_handle_t r_bsh;
1314 uint64_t aff;
1315 uint64_t typer;
1316 uint32_t pidr2;
1317 u_int cpuid;
1318 size_t i;
1319
1320 cpuid = PCPU_GET(cpuid);
1321
1322 aff = CPU_AFFINITY(cpuid);
1323 /* Affinity in format for comparison with typer */
1324 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1325 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1326
1327 if (bootverbose) {
1328 device_printf(sc->dev,
1329 "Start searching for Re-Distributor\n");
1330 }
1331 /* Iterate through Re-Distributor regions */
1332 for (i = 0; i < sc->gic_redists.nregions; i++) {
1333 /* Take a copy of the region's resource */
1334 r_res = *sc->gic_redists.regions[i];
1335 r_bsh = rman_get_bushandle(&r_res);
1336
1337 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1338 switch (GICR_PIDR2_ARCH(pidr2)) {
1339 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1340 case GICR_PIDR2_ARCH_GICv4:
1341 break;
1342 default:
1343 device_printf(sc->dev,
1344 "No Re-Distributor found for CPU%u\n", cpuid);
1345 return (ENODEV);
1346 }
1347
1348 do {
1349 typer = bus_read_8(&r_res, GICR_TYPER);
1350 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1351 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1352 ("Invalid pointer to per-CPU redistributor"));
1353 /* Copy res contents to its final destination */
1354 sc->gic_redists.pcpu[cpuid]->res = r_res;
1355 sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1356 if (bootverbose) {
1357 device_printf(sc->dev,
1358 "CPU%u Re-Distributor has been found\n",
1359 cpuid);
1360 }
1361 return (0);
1362 }
1363
1364 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1365 if ((typer & GICR_TYPER_VLPIS) != 0) {
1366 r_bsh +=
1367 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1368 }
1369
1370 rman_set_bushandle(&r_res, r_bsh);
1371 } while ((typer & GICR_TYPER_LAST) == 0);
1372 }
1373
1374 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1375 return (ENXIO);
1376 }
1377
1378 static int
1379 gic_v3_redist_wake(struct gic_v3_softc *sc)
1380 {
1381 uint32_t waker;
1382 size_t us_left = 1000000;
1383
1384 waker = gic_r_read(sc, 4, GICR_WAKER);
1385 /* Wake up Re-Distributor for this CPU */
1386 waker &= ~GICR_WAKER_PS;
1387 gic_r_write(sc, 4, GICR_WAKER, waker);
1388 /*
1389 * When clearing ProcessorSleep bit it is required to wait for
1390 * ChildrenAsleep to become zero following the processor power-on.
1391 */
1392 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1393 DELAY(1);
1394 if (us_left-- == 0) {
1395 panic("Could not wake Re-Distributor for CPU%u",
1396 PCPU_GET(cpuid));
1397 }
1398 }
1399
1400 if (bootverbose) {
1401 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1402 PCPU_GET(cpuid));
1403 }
1404
1405 return (0);
1406 }
1407
1408 static int
1409 gic_v3_redist_init(struct gic_v3_softc *sc)
1410 {
1411 int err;
1412 size_t i;
1413
1414 err = gic_v3_redist_find(sc);
1415 if (err != 0)
1416 return (err);
1417
1418 err = gic_v3_redist_wake(sc);
1419 if (err != 0)
1420 return (err);
1421
1422 /* Configure SGIs and PPIs to be Group1 Non-secure */
1423 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1424 0xFFFFFFFF);
1425
1426 /* Disable SPIs */
1427 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1428 GICR_I_ENABLER_PPI_MASK);
1429 /* Enable SGIs */
1430 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1431 GICR_I_ENABLER_SGI_MASK);
1432
1433 /* Set priority for SGIs and PPIs */
1434 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1435 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1436 GIC_PRIORITY_MAX);
1437 }
1438
1439 gic_v3_wait_for_rwp(sc, REDIST);
1440
1441 return (0);
1442 }
1443
1444 /*
1445 * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1446 */
1447
1448 static int
1449 gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1450 int count, int maxcount, struct intr_irqsrc **isrc)
1451 {
1452 struct gic_v3_softc *sc;
1453 int i, irq, end_irq;
1454 bool found;
1455
1456 KASSERT(powerof2(count), ("%s: bad count", __func__));
1457 KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1458
1459 sc = device_get_softc(dev);
1460
1461 mtx_lock(&sc->gic_mbi_mtx);
1462
1463 found = false;
1464 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1465 /* Start on an aligned interrupt */
1466 if ((irq & (maxcount - 1)) != 0)
1467 continue;
1468
1469 /* Assume we found a valid range until shown otherwise */
1470 found = true;
1471
1472 /* Check this range is valid */
1473 for (end_irq = irq; end_irq != irq + count; end_irq++) {
1474 /* No free interrupts */
1475 if (end_irq == mbi_start + mbi_count) {
1476 found = false;
1477 break;
1478 }
1479
1480 KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1481 ("%s: Non-MSI interrupt found", __func__));
1482
1483 /* This is already used */
1484 if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1485 GI_FLAG_MSI_USED) {
1486 found = false;
1487 break;
1488 }
1489 }
1490 if (found)
1491 break;
1492 }
1493
1494 /* Not enough interrupts were found */
1495 if (!found || irq == mbi_start + mbi_count) {
1496 mtx_unlock(&sc->gic_mbi_mtx);
1497 return (ENXIO);
1498 }
1499
1500 for (i = 0; i < count; i++) {
1501 /* Mark the interrupt as used */
1502 sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1503 }
1504 mtx_unlock(&sc->gic_mbi_mtx);
1505
1506 for (i = 0; i < count; i++)
1507 isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1508
1509 return (0);
1510 }
1511
1512 static int
1513 gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1514 {
1515 struct gic_v3_softc *sc;
1516 struct gic_v3_irqsrc *gi;
1517 int i;
1518
1519 sc = device_get_softc(dev);
1520
1521 mtx_lock(&sc->gic_mbi_mtx);
1522 for (i = 0; i < count; i++) {
1523 gi = (struct gic_v3_irqsrc *)isrc[i];
1524
1525 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1526 ("%s: Trying to release an unused MSI-X interrupt",
1527 __func__));
1528
1529 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1530 }
1531 mtx_unlock(&sc->gic_mbi_mtx);
1532
1533 return (0);
1534 }
1535
1536 static int
1537 gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1538 struct intr_irqsrc **isrcp)
1539 {
1540 struct gic_v3_softc *sc;
1541 int irq;
1542
1543 sc = device_get_softc(dev);
1544
1545 mtx_lock(&sc->gic_mbi_mtx);
1546 /* Find an unused interrupt */
1547 for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1548 KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1549 ("%s: Non-MSI interrupt found", __func__));
1550 if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1551 break;
1552 }
1553 /* No free interrupt was found */
1554 if (irq == mbi_start + mbi_count) {
1555 mtx_unlock(&sc->gic_mbi_mtx);
1556 return (ENXIO);
1557 }
1558
1559 /* Mark the interrupt as used */
1560 sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1561 mtx_unlock(&sc->gic_mbi_mtx);
1562
1563 *isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1564
1565 return (0);
1566 }
1567
1568 static int
1569 gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1570 {
1571 struct gic_v3_softc *sc;
1572 struct gic_v3_irqsrc *gi;
1573
1574 sc = device_get_softc(dev);
1575 gi = (struct gic_v3_irqsrc *)isrc;
1576
1577 KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1578 ("%s: Trying to release an unused MSI-X interrupt", __func__));
1579
1580 mtx_lock(&sc->gic_mbi_mtx);
1581 gi->gi_flags &= ~GI_FLAG_MSI_USED;
1582 mtx_unlock(&sc->gic_mbi_mtx);
1583
1584 return (0);
1585 }
1586
1587 static int
1588 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1589 device_t *pic, struct intr_irqsrc **isrc)
1590 {
1591 struct gic_v3_softc *sc;
1592 int error;
1593
1594 sc = device_get_softc(dev);
1595 error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1596 sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1597 if (error != 0)
1598 return (error);
1599
1600 *pic = dev;
1601 return (0);
1602 }
1603
1604 static int
1605 gic_v3_release_msi(device_t dev, device_t child, int count,
1606 struct intr_irqsrc **isrc)
1607 {
1608 return (gic_v3_gic_release_msi(dev, count, isrc));
1609 }
1610
1611 static int
1612 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1613 struct intr_irqsrc **isrc)
1614 {
1615 struct gic_v3_softc *sc;
1616 int error;
1617
1618 sc = device_get_softc(dev);
1619 error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1620 sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1621 if (error != 0)
1622 return (error);
1623
1624 *pic = dev;
1625
1626 return (0);
1627 }
1628
1629 static int
1630 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1631 {
1632 return (gic_v3_gic_release_msix(dev, isrc));
1633 }
1634
1635 static int
1636 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1637 uint64_t *addr, uint32_t *data)
1638 {
1639 struct gic_v3_softc *sc = device_get_softc(dev);
1640 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1641
1642 *addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1643 *data = gi->gi_irq;
1644
1645 return (0);
1646 }
Cache object: 6d8c02d1bd5623083bf65c31d386c6d0
|