1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bitstring.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/rman.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/cpuset.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/smp.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71
72 #include "pic_if.h"
73
74 #include <arm/arm/gic_common.h>
75 #include "gic_v3_reg.h"
76 #include "gic_v3_var.h"
77
78 static bus_get_domain_t gic_v3_get_domain;
79 static bus_read_ivar_t gic_v3_read_ivar;
80
81 static pic_disable_intr_t gic_v3_disable_intr;
82 static pic_enable_intr_t gic_v3_enable_intr;
83 static pic_map_intr_t gic_v3_map_intr;
84 static pic_setup_intr_t gic_v3_setup_intr;
85 static pic_teardown_intr_t gic_v3_teardown_intr;
86 static pic_post_filter_t gic_v3_post_filter;
87 static pic_post_ithread_t gic_v3_post_ithread;
88 static pic_pre_ithread_t gic_v3_pre_ithread;
89 static pic_bind_intr_t gic_v3_bind_intr;
90 #ifdef SMP
91 static pic_init_secondary_t gic_v3_init_secondary;
92 static pic_ipi_send_t gic_v3_ipi_send;
93 static pic_ipi_setup_t gic_v3_ipi_setup;
94 #endif
95
96 static u_int gic_irq_cpu;
97 #ifdef SMP
98 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
99 static u_int sgi_first_unused = GIC_FIRST_SGI;
100 #endif
101
102 static device_method_t gic_v3_methods[] = {
103 /* Device interface */
104 DEVMETHOD(device_detach, gic_v3_detach),
105
106 /* Bus interface */
107 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
108 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
109
110 /* Interrupt controller interface */
111 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
112 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
113 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
114 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
115 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
116 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
117 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
118 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
119 #ifdef SMP
120 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
121 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
122 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
123 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
124 #endif
125
126 /* End */
127 DEVMETHOD_END
128 };
129
130 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
131 sizeof(struct gic_v3_softc));
132
133 /*
134 * Driver-specific definitions.
135 */
136 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
137
138 /*
139 * Helper functions and definitions.
140 */
141 /* Destination registers, either Distributor or Re-Distributor */
142 enum gic_v3_xdist {
143 DIST = 0,
144 REDIST,
145 };
146
147 struct gic_v3_irqsrc {
148 struct intr_irqsrc gi_isrc;
149 uint32_t gi_irq;
150 enum intr_polarity gi_pol;
151 enum intr_trigger gi_trig;
152 };
153
154 /* Helper routines starting with gic_v3_ */
155 static int gic_v3_dist_init(struct gic_v3_softc *);
156 static int gic_v3_redist_alloc(struct gic_v3_softc *);
157 static int gic_v3_redist_find(struct gic_v3_softc *);
158 static int gic_v3_redist_init(struct gic_v3_softc *);
159 static int gic_v3_cpu_init(struct gic_v3_softc *);
160 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
161
162 /* A sequence of init functions for primary (boot) CPU */
163 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
164 /* Primary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_primary_init[] = {
166 gic_v3_dist_init,
167 gic_v3_redist_alloc,
168 gic_v3_redist_init,
169 gic_v3_cpu_init,
170 NULL
171 };
172
173 #ifdef SMP
174 /* Secondary CPU initialization sequence */
175 static gic_v3_initseq_t gic_v3_secondary_init[] = {
176 gic_v3_redist_init,
177 gic_v3_cpu_init,
178 NULL
179 };
180 #endif
181
182 uint32_t
183 gic_r_read_4(device_t dev, bus_size_t offset)
184 {
185 struct gic_v3_softc *sc;
186 struct resource *rdist;
187
188 sc = device_get_softc(dev);
189 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
190 return (bus_read_4(rdist, offset));
191 }
192
193 uint64_t
194 gic_r_read_8(device_t dev, bus_size_t offset)
195 {
196 struct gic_v3_softc *sc;
197 struct resource *rdist;
198
199 sc = device_get_softc(dev);
200 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
201 return (bus_read_8(rdist, offset));
202 }
203
204 void
205 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
206 {
207 struct gic_v3_softc *sc;
208 struct resource *rdist;
209
210 sc = device_get_softc(dev);
211 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
212 bus_write_4(rdist, offset, val);
213 }
214
215 void
216 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
217 {
218 struct gic_v3_softc *sc;
219 struct resource *rdist;
220
221 sc = device_get_softc(dev);
222 rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
223 bus_write_8(rdist, offset, val);
224 }
225
226 /*
227 * Device interface.
228 */
229 int
230 gic_v3_attach(device_t dev)
231 {
232 struct gic_v3_softc *sc;
233 gic_v3_initseq_t *init_func;
234 uint32_t typer;
235 int rid;
236 int err;
237 size_t i;
238 u_int irq;
239 const char *name;
240
241 sc = device_get_softc(dev);
242 sc->gic_registered = FALSE;
243 sc->dev = dev;
244 err = 0;
245
246 /* Initialize mutex */
247 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
248
249 /*
250 * Allocate array of struct resource.
251 * One entry for Distributor and all remaining for Re-Distributor.
252 */
253 sc->gic_res = malloc(
254 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
255 M_GIC_V3, M_WAITOK);
256
257 /* Now allocate corresponding resources */
258 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
259 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
260 &rid, RF_ACTIVE);
261 if (sc->gic_res[rid] == NULL)
262 return (ENXIO);
263 }
264
265 /*
266 * Distributor interface
267 */
268 sc->gic_dist = sc->gic_res[0];
269
270 /*
271 * Re-Dristributor interface
272 */
273 /* Allocate space under region descriptions */
274 sc->gic_redists.regions = malloc(
275 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
276 M_GIC_V3, M_WAITOK);
277
278 /* Fill-up bus_space information for each region. */
279 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
280 sc->gic_redists.regions[i] = sc->gic_res[rid];
281
282 /* Get the number of supported SPI interrupts */
283 typer = gic_d_read(sc, 4, GICD_TYPER);
284 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
285 if (sc->gic_nirqs > GIC_I_NUM_MAX)
286 sc->gic_nirqs = GIC_I_NUM_MAX;
287
288 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
289 M_GIC_V3, M_WAITOK | M_ZERO);
290 name = device_get_nameunit(dev);
291 for (irq = 0; irq < sc->gic_nirqs; irq++) {
292 struct intr_irqsrc *isrc;
293
294 sc->gic_irqs[irq].gi_irq = irq;
295 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
296 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
297
298 isrc = &sc->gic_irqs[irq].gi_isrc;
299 if (irq <= GIC_LAST_SGI) {
300 err = intr_isrc_register(isrc, sc->dev,
301 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
302 } else if (irq <= GIC_LAST_PPI) {
303 err = intr_isrc_register(isrc, sc->dev,
304 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
305 } else {
306 err = intr_isrc_register(isrc, sc->dev, 0,
307 "%s,s%u", name, irq - GIC_FIRST_SPI);
308 }
309 if (err != 0) {
310 /* XXX call intr_isrc_deregister() */
311 free(sc->gic_irqs, M_DEVBUF);
312 return (err);
313 }
314 }
315
316 /*
317 * Read the Peripheral ID2 register. This is an implementation
318 * defined register, but seems to be implemented in all GICv3
319 * parts and Linux expects it to be there.
320 */
321 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
322
323 /* Get the number of supported interrupt identifier bits */
324 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
325
326 if (bootverbose) {
327 device_printf(dev, "SPIs: %u, IDs: %u\n",
328 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
329 }
330
331 /* Train init sequence for boot CPU */
332 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
333 err = (*init_func)(sc);
334 if (err != 0)
335 return (err);
336 }
337
338 return (0);
339 }
340
341 int
342 gic_v3_detach(device_t dev)
343 {
344 struct gic_v3_softc *sc;
345 size_t i;
346 int rid;
347
348 sc = device_get_softc(dev);
349
350 if (device_is_attached(dev)) {
351 /*
352 * XXX: We should probably deregister PIC
353 */
354 if (sc->gic_registered)
355 panic("Trying to detach registered PIC");
356 }
357 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
358 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
359
360 for (i = 0; i <= mp_maxid; i++)
361 free(sc->gic_redists.pcpu[i], M_GIC_V3);
362
363 free(sc->gic_res, M_GIC_V3);
364 free(sc->gic_redists.regions, M_GIC_V3);
365
366 return (0);
367 }
368
369 static int
370 gic_v3_get_domain(device_t dev, device_t child, int *domain)
371 {
372 struct gic_v3_devinfo *di;
373
374 di = device_get_ivars(child);
375 if (di->gic_domain < 0)
376 return (ENOENT);
377
378 *domain = di->gic_domain;
379 return (0);
380 }
381
382 static int
383 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
384 {
385 struct gic_v3_softc *sc;
386
387 sc = device_get_softc(dev);
388
389 switch (which) {
390 case GICV3_IVAR_NIRQS:
391 *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
392 return (0);
393 case GICV3_IVAR_REDIST_VADDR:
394 *result = (uintptr_t)rman_get_virtual(
395 &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res);
396 return (0);
397 case GICV3_IVAR_REDIST:
398 *result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
399 return (0);
400 case GIC_IVAR_HW_REV:
401 KASSERT(
402 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
403 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
404 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
405 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
406 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
407 return (0);
408 case GIC_IVAR_BUS:
409 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
410 ("gic_v3_read_ivar: Unknown bus type"));
411 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
412 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
413 *result = sc->gic_bus;
414 return (0);
415 }
416
417 return (ENOENT);
418 }
419
420 int
421 arm_gic_v3_intr(void *arg)
422 {
423 struct gic_v3_softc *sc = arg;
424 struct gic_v3_irqsrc *gi;
425 struct intr_pic *pic;
426 uint64_t active_irq;
427 struct trapframe *tf;
428
429 pic = sc->gic_pic;
430
431 while (1) {
432 if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
433 /*
434 * Hardware: Cavium ThunderX
435 * Chip revision: Pass 1.0 (early version)
436 * Pass 1.1 (production)
437 * ERRATUM: 22978, 23154
438 */
439 __asm __volatile(
440 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
441 "mrs %0, ICC_IAR1_EL1 \n"
442 "nop;nop;nop;nop; \n"
443 "dsb sy \n"
444 : "=&r" (active_irq));
445 } else {
446 active_irq = gic_icc_read(IAR1);
447 }
448
449 if (active_irq >= GIC_FIRST_LPI) {
450 intr_child_irq_handler(pic, active_irq);
451 continue;
452 }
453
454 if (__predict_false(active_irq >= sc->gic_nirqs))
455 return (FILTER_HANDLED);
456
457 tf = curthread->td_intr_frame;
458 gi = &sc->gic_irqs[active_irq];
459 if (active_irq <= GIC_LAST_SGI) {
460 /* Call EOI for all IPI before dispatch. */
461 gic_icc_write(EOIR1, (uint64_t)active_irq);
462 #ifdef SMP
463 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
464 #else
465 device_printf(sc->dev, "SGI %ju on UP system detected\n",
466 (uintmax_t)(active_irq - GIC_FIRST_SGI));
467 #endif
468 } else if (active_irq >= GIC_FIRST_PPI &&
469 active_irq <= GIC_LAST_SPI) {
470 if (gi->gi_trig == INTR_TRIGGER_EDGE)
471 gic_icc_write(EOIR1, gi->gi_irq);
472
473 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
474 if (gi->gi_trig != INTR_TRIGGER_EDGE)
475 gic_icc_write(EOIR1, gi->gi_irq);
476 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
477 device_printf(sc->dev,
478 "Stray irq %lu disabled\n", active_irq);
479 }
480 }
481 }
482 }
483
484 #ifdef FDT
485 static int
486 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
487 enum intr_polarity *polp, enum intr_trigger *trigp)
488 {
489 u_int irq;
490
491 if (ncells < 3)
492 return (EINVAL);
493
494 /*
495 * The 1st cell is the interrupt type:
496 * 0 = SPI
497 * 1 = PPI
498 * The 2nd cell contains the interrupt number:
499 * [0 - 987] for SPI
500 * [0 - 15] for PPI
501 * The 3rd cell is the flags, encoded as follows:
502 * bits[3:0] trigger type and level flags
503 * 1 = edge triggered
504 * 2 = edge triggered (PPI only)
505 * 4 = level-sensitive
506 * 8 = level-sensitive (PPI only)
507 */
508 switch (cells[0]) {
509 case 0:
510 irq = GIC_FIRST_SPI + cells[1];
511 /* SPI irq is checked later. */
512 break;
513 case 1:
514 irq = GIC_FIRST_PPI + cells[1];
515 if (irq > GIC_LAST_PPI) {
516 device_printf(dev, "unsupported PPI interrupt "
517 "number %u\n", cells[1]);
518 return (EINVAL);
519 }
520 break;
521 default:
522 device_printf(dev, "unsupported interrupt type "
523 "configuration %u\n", cells[0]);
524 return (EINVAL);
525 }
526
527 switch (cells[2] & FDT_INTR_MASK) {
528 case FDT_INTR_EDGE_RISING:
529 *trigp = INTR_TRIGGER_EDGE;
530 *polp = INTR_POLARITY_HIGH;
531 break;
532 case FDT_INTR_EDGE_FALLING:
533 *trigp = INTR_TRIGGER_EDGE;
534 *polp = INTR_POLARITY_LOW;
535 break;
536 case FDT_INTR_LEVEL_HIGH:
537 *trigp = INTR_TRIGGER_LEVEL;
538 *polp = INTR_POLARITY_HIGH;
539 break;
540 case FDT_INTR_LEVEL_LOW:
541 *trigp = INTR_TRIGGER_LEVEL;
542 *polp = INTR_POLARITY_LOW;
543 break;
544 default:
545 device_printf(dev, "unsupported trigger/polarity "
546 "configuration 0x%02x\n", cells[2]);
547 return (EINVAL);
548 }
549
550 /* Check the interrupt is valid */
551 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
552 return (EINVAL);
553
554 *irqp = irq;
555 return (0);
556 }
557 #endif
558
559 static int
560 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
561 enum intr_polarity *polp, enum intr_trigger *trigp)
562 {
563 struct gic_v3_irqsrc *gi;
564
565 /* SPI-mapped MSI */
566 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
567 if (gi == NULL)
568 return (ENXIO);
569
570 *irqp = gi->gi_irq;
571
572 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
573 *polp = INTR_POLARITY_HIGH;
574 *trigp = INTR_TRIGGER_EDGE;
575
576 return (0);
577 }
578
579 static int
580 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
581 enum intr_polarity *polp, enum intr_trigger *trigp)
582 {
583 struct gic_v3_softc *sc;
584 enum intr_polarity pol;
585 enum intr_trigger trig;
586 struct intr_map_data_msi *dam;
587 #ifdef FDT
588 struct intr_map_data_fdt *daf;
589 #endif
590 #ifdef DEV_ACPI
591 struct intr_map_data_acpi *daa;
592 #endif
593 u_int irq;
594
595 sc = device_get_softc(dev);
596
597 switch (data->type) {
598 #ifdef FDT
599 case INTR_MAP_DATA_FDT:
600 daf = (struct intr_map_data_fdt *)data;
601 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
602 &trig) != 0)
603 return (EINVAL);
604 break;
605 #endif
606 #ifdef DEV_ACPI
607 case INTR_MAP_DATA_ACPI:
608 daa = (struct intr_map_data_acpi *)data;
609 irq = daa->irq;
610 pol = daa->pol;
611 trig = daa->trig;
612 break;
613 #endif
614 case INTR_MAP_DATA_MSI:
615 /* SPI-mapped MSI */
616 dam = (struct intr_map_data_msi *)data;
617 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
618 return (EINVAL);
619 break;
620 default:
621 return (EINVAL);
622 }
623
624 if (irq >= sc->gic_nirqs)
625 return (EINVAL);
626 switch (pol) {
627 case INTR_POLARITY_CONFORM:
628 case INTR_POLARITY_LOW:
629 case INTR_POLARITY_HIGH:
630 break;
631 default:
632 return (EINVAL);
633 }
634 switch (trig) {
635 case INTR_TRIGGER_CONFORM:
636 case INTR_TRIGGER_EDGE:
637 case INTR_TRIGGER_LEVEL:
638 break;
639 default:
640 return (EINVAL);
641 }
642
643 *irqp = irq;
644 if (polp != NULL)
645 *polp = pol;
646 if (trigp != NULL)
647 *trigp = trig;
648 return (0);
649 }
650
651 static int
652 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
653 struct intr_irqsrc **isrcp)
654 {
655 struct gic_v3_softc *sc;
656 int error;
657 u_int irq;
658
659 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
660 if (error == 0) {
661 sc = device_get_softc(dev);
662 *isrcp = GIC_INTR_ISRC(sc, irq);
663 }
664 return (error);
665 }
666
667 static int
668 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
669 struct resource *res, struct intr_map_data *data)
670 {
671 struct gic_v3_softc *sc = device_get_softc(dev);
672 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
673 enum intr_trigger trig;
674 enum intr_polarity pol;
675 uint32_t reg;
676 u_int irq;
677 int error;
678
679 if (data == NULL)
680 return (ENOTSUP);
681
682 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
683 if (error != 0)
684 return (error);
685
686 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
687 trig == INTR_TRIGGER_CONFORM)
688 return (EINVAL);
689
690 /* Compare config if this is not first setup. */
691 if (isrc->isrc_handlers != 0) {
692 if (pol != gi->gi_pol || trig != gi->gi_trig)
693 return (EINVAL);
694 else
695 return (0);
696 }
697
698 gi->gi_pol = pol;
699 gi->gi_trig = trig;
700
701 /*
702 * XXX - In case that per CPU interrupt is going to be enabled in time
703 * when SMP is already started, we need some IPI call which
704 * enables it on others CPUs. Further, it's more complicated as
705 * pic_enable_source() and pic_disable_source() should act on
706 * per CPU basis only. Thus, it should be solved here somehow.
707 */
708 if (isrc->isrc_flags & INTR_ISRCF_PPI)
709 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
710
711 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
712 mtx_lock_spin(&sc->gic_mtx);
713
714 /* Set the trigger and polarity */
715 if (irq <= GIC_LAST_PPI)
716 reg = gic_r_read(sc, 4,
717 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
718 else
719 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
720 if (trig == INTR_TRIGGER_LEVEL)
721 reg &= ~(2 << ((irq % 16) * 2));
722 else
723 reg |= 2 << ((irq % 16) * 2);
724
725 if (irq <= GIC_LAST_PPI) {
726 gic_r_write(sc, 4,
727 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
728 gic_v3_wait_for_rwp(sc, REDIST);
729 } else {
730 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
731 gic_v3_wait_for_rwp(sc, DIST);
732 }
733
734 mtx_unlock_spin(&sc->gic_mtx);
735
736 gic_v3_bind_intr(dev, isrc);
737 }
738
739 return (0);
740 }
741
742 static int
743 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
744 struct resource *res, struct intr_map_data *data)
745 {
746 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
747
748 if (isrc->isrc_handlers == 0) {
749 gi->gi_pol = INTR_POLARITY_CONFORM;
750 gi->gi_trig = INTR_TRIGGER_CONFORM;
751 }
752
753 return (0);
754 }
755
756 static void
757 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
758 {
759 struct gic_v3_softc *sc;
760 struct gic_v3_irqsrc *gi;
761 u_int irq;
762
763 sc = device_get_softc(dev);
764 gi = (struct gic_v3_irqsrc *)isrc;
765 irq = gi->gi_irq;
766
767 if (irq <= GIC_LAST_PPI) {
768 /* SGIs and PPIs in corresponding Re-Distributor */
769 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
770 GICD_I_MASK(irq));
771 gic_v3_wait_for_rwp(sc, REDIST);
772 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
773 /* SPIs in distributor */
774 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
775 gic_v3_wait_for_rwp(sc, DIST);
776 } else
777 panic("%s: Unsupported IRQ %u", __func__, irq);
778 }
779
780 static void
781 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
782 {
783 struct gic_v3_softc *sc;
784 struct gic_v3_irqsrc *gi;
785 u_int irq;
786
787 sc = device_get_softc(dev);
788 gi = (struct gic_v3_irqsrc *)isrc;
789 irq = gi->gi_irq;
790
791 if (irq <= GIC_LAST_PPI) {
792 /* SGIs and PPIs in corresponding Re-Distributor */
793 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
794 GICD_I_MASK(irq));
795 gic_v3_wait_for_rwp(sc, REDIST);
796 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
797 /* SPIs in distributor */
798 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
799 gic_v3_wait_for_rwp(sc, DIST);
800 } else
801 panic("%s: Unsupported IRQ %u", __func__, irq);
802 }
803
804 static void
805 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
806 {
807 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
808
809 gic_v3_disable_intr(dev, isrc);
810 gic_icc_write(EOIR1, gi->gi_irq);
811 }
812
813 static void
814 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
815 {
816
817 gic_v3_enable_intr(dev, isrc);
818 }
819
820 static void
821 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
822 {
823 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
824
825 if (gi->gi_trig == INTR_TRIGGER_EDGE)
826 return;
827
828 gic_icc_write(EOIR1, gi->gi_irq);
829 }
830
831 static int
832 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
833 {
834 struct gic_v3_softc *sc;
835 struct gic_v3_irqsrc *gi;
836 int cpu;
837
838 gi = (struct gic_v3_irqsrc *)isrc;
839 if (gi->gi_irq <= GIC_LAST_PPI)
840 return (EINVAL);
841
842 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
843 ("%s: Attempting to bind an invalid IRQ", __func__));
844
845 sc = device_get_softc(dev);
846
847 if (CPU_EMPTY(&isrc->isrc_cpu)) {
848 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
849 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
850 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
851 CPU_AFFINITY(gic_irq_cpu));
852 } else {
853 /*
854 * We can only bind to a single CPU so select
855 * the first CPU found.
856 */
857 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
858 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
859 }
860
861 return (0);
862 }
863
864 #ifdef SMP
865 static void
866 gic_v3_init_secondary(device_t dev)
867 {
868 device_t child;
869 struct gic_v3_softc *sc;
870 gic_v3_initseq_t *init_func;
871 struct intr_irqsrc *isrc;
872 u_int cpu, irq;
873 int err, i;
874
875 sc = device_get_softc(dev);
876 cpu = PCPU_GET(cpuid);
877
878 /* Train init sequence for boot CPU */
879 for (init_func = gic_v3_secondary_init; *init_func != NULL;
880 init_func++) {
881 err = (*init_func)(sc);
882 if (err != 0) {
883 device_printf(dev,
884 "Could not initialize GIC for CPU%u\n", cpu);
885 return;
886 }
887 }
888
889 /* Unmask attached SGI interrupts. */
890 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
891 isrc = GIC_INTR_ISRC(sc, irq);
892 if (intr_isrc_init_on_cpu(isrc, cpu))
893 gic_v3_enable_intr(dev, isrc);
894 }
895
896 /* Unmask attached PPI interrupts. */
897 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
898 isrc = GIC_INTR_ISRC(sc, irq);
899 if (intr_isrc_init_on_cpu(isrc, cpu))
900 gic_v3_enable_intr(dev, isrc);
901 }
902
903 for (i = 0; i < sc->gic_nchildren; i++) {
904 child = sc->gic_children[i];
905 PIC_INIT_SECONDARY(child);
906 }
907 }
908
909 static void
910 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
911 u_int ipi)
912 {
913 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
914 uint64_t aff, val, irq;
915 int i;
916
917 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
918 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
919 aff = GIC_AFFINITY(0);
920 irq = gi->gi_irq;
921 val = 0;
922
923 /* Iterate through all CPUs in set */
924 for (i = 0; i <= mp_maxid; i++) {
925 /* Move to the next affinity group */
926 if (aff != GIC_AFFINITY(i)) {
927 /* Send the IPI */
928 if (val != 0) {
929 gic_icc_write(SGI1R, val);
930 val = 0;
931 }
932 aff = GIC_AFFINITY(i);
933 }
934
935 /* Send the IPI to this cpu */
936 if (CPU_ISSET(i, &cpus)) {
937 #define ICC_SGI1R_AFFINITY(aff) \
938 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
939 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
940 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
941 /* Set the affinity when the first at this level */
942 if (val == 0)
943 val = ICC_SGI1R_AFFINITY(aff) |
944 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
945 /* Set the bit to send the IPI to te CPU */
946 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
947 }
948 }
949
950 /* Send the IPI to the last cpu affinity group */
951 if (val != 0)
952 gic_icc_write(SGI1R, val);
953 #undef GIC_AFF_MASK
954 #undef GIC_AFFINITY
955 }
956
957 static int
958 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
959 {
960 struct intr_irqsrc *isrc;
961 struct gic_v3_softc *sc = device_get_softc(dev);
962
963 if (sgi_first_unused > GIC_LAST_SGI)
964 return (ENOSPC);
965
966 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
967 sgi_to_ipi[sgi_first_unused++] = ipi;
968
969 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
970
971 *isrcp = isrc;
972 return (0);
973 }
974 #endif /* SMP */
975
976 /*
977 * Helper routines
978 */
979 static void
980 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
981 {
982 struct resource *res;
983 u_int cpuid;
984 size_t us_left = 1000000;
985
986 cpuid = PCPU_GET(cpuid);
987
988 switch (xdist) {
989 case DIST:
990 res = sc->gic_dist;
991 break;
992 case REDIST:
993 res = &sc->gic_redists.pcpu[cpuid]->res;
994 break;
995 default:
996 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
997 return;
998 }
999
1000 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1001 DELAY(1);
1002 if (us_left-- == 0)
1003 panic("GICD Register write pending for too long");
1004 }
1005 }
1006
1007 /* CPU interface. */
1008 static __inline void
1009 gic_v3_cpu_priority(uint64_t mask)
1010 {
1011
1012 /* Set prority mask */
1013 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1014 }
1015
1016 static int
1017 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1018 {
1019 uint64_t sre;
1020 u_int cpuid;
1021
1022 cpuid = PCPU_GET(cpuid);
1023 /*
1024 * Set the SRE bit to enable access to GIC CPU interface
1025 * via system registers.
1026 */
1027 sre = READ_SPECIALREG(icc_sre_el1);
1028 sre |= ICC_SRE_EL1_SRE;
1029 WRITE_SPECIALREG(icc_sre_el1, sre);
1030 isb();
1031 /*
1032 * Now ensure that the bit is set.
1033 */
1034 sre = READ_SPECIALREG(icc_sre_el1);
1035 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1036 /* We are done. This was disabled in EL2 */
1037 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1038 "via system registers\n", cpuid);
1039 return (ENXIO);
1040 } else if (bootverbose) {
1041 device_printf(sc->dev,
1042 "CPU%u enabled CPU interface via system registers\n",
1043 cpuid);
1044 }
1045
1046 return (0);
1047 }
1048
1049 static int
1050 gic_v3_cpu_init(struct gic_v3_softc *sc)
1051 {
1052 int err;
1053
1054 /* Enable access to CPU interface via system registers */
1055 err = gic_v3_cpu_enable_sre(sc);
1056 if (err != 0)
1057 return (err);
1058 /* Priority mask to minimum - accept all interrupts */
1059 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1060 /* Disable EOI mode */
1061 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1062 /* Enable group 1 (insecure) interrups */
1063 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1064
1065 return (0);
1066 }
1067
1068 /* Distributor */
1069 static int
1070 gic_v3_dist_init(struct gic_v3_softc *sc)
1071 {
1072 uint64_t aff;
1073 u_int i;
1074
1075 /*
1076 * 1. Disable the Distributor
1077 */
1078 gic_d_write(sc, 4, GICD_CTLR, 0);
1079 gic_v3_wait_for_rwp(sc, DIST);
1080
1081 /*
1082 * 2. Configure the Distributor
1083 */
1084 /* Set all SPIs to be Group 1 Non-secure */
1085 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1086 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1087
1088 /* Set all global interrupts to be level triggered, active low. */
1089 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1090 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1091
1092 /* Set priority to all shared interrupts */
1093 for (i = GIC_FIRST_SPI;
1094 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1095 /* Set highest priority */
1096 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1097 }
1098
1099 /*
1100 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1101 * Re-Distributor registers.
1102 */
1103 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1104 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1105
1106 gic_v3_wait_for_rwp(sc, DIST);
1107
1108 /*
1109 * 3. Enable Distributor
1110 */
1111 /* Enable Distributor with ARE, Group 1 */
1112 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1113 GICD_CTLR_G1);
1114
1115 /*
1116 * 4. Route all interrupts to boot CPU.
1117 */
1118 aff = CPU_AFFINITY(0);
1119 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1120 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1121
1122 return (0);
1123 }
1124
1125 /* Re-Distributor */
1126 static int
1127 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1128 {
1129 u_int cpuid;
1130
1131 /* Allocate struct resource for all CPU's Re-Distributor registers */
1132 for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1133 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1134 sc->gic_redists.pcpu[cpuid] =
1135 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1136 M_GIC_V3, M_WAITOK);
1137 else
1138 sc->gic_redists.pcpu[cpuid] = NULL;
1139 return (0);
1140 }
1141
1142 static int
1143 gic_v3_redist_find(struct gic_v3_softc *sc)
1144 {
1145 struct resource r_res;
1146 bus_space_handle_t r_bsh;
1147 uint64_t aff;
1148 uint64_t typer;
1149 uint32_t pidr2;
1150 u_int cpuid;
1151 size_t i;
1152
1153 cpuid = PCPU_GET(cpuid);
1154
1155 aff = CPU_AFFINITY(cpuid);
1156 /* Affinity in format for comparison with typer */
1157 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1158 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1159
1160 if (bootverbose) {
1161 device_printf(sc->dev,
1162 "Start searching for Re-Distributor\n");
1163 }
1164 /* Iterate through Re-Distributor regions */
1165 for (i = 0; i < sc->gic_redists.nregions; i++) {
1166 /* Take a copy of the region's resource */
1167 r_res = *sc->gic_redists.regions[i];
1168 r_bsh = rman_get_bushandle(&r_res);
1169
1170 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1171 switch (GICR_PIDR2_ARCH(pidr2)) {
1172 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1173 case GICR_PIDR2_ARCH_GICv4:
1174 break;
1175 default:
1176 device_printf(sc->dev,
1177 "No Re-Distributor found for CPU%u\n", cpuid);
1178 return (ENODEV);
1179 }
1180
1181 do {
1182 typer = bus_read_8(&r_res, GICR_TYPER);
1183 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1184 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1185 ("Invalid pointer to per-CPU redistributor"));
1186 /* Copy res contents to its final destination */
1187 sc->gic_redists.pcpu[cpuid]->res = r_res;
1188 sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1189 if (bootverbose) {
1190 device_printf(sc->dev,
1191 "CPU%u Re-Distributor has been found\n",
1192 cpuid);
1193 }
1194 return (0);
1195 }
1196
1197 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1198 if ((typer & GICR_TYPER_VLPIS) != 0) {
1199 r_bsh +=
1200 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1201 }
1202
1203 rman_set_bushandle(&r_res, r_bsh);
1204 } while ((typer & GICR_TYPER_LAST) == 0);
1205 }
1206
1207 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1208 return (ENXIO);
1209 }
1210
1211 static int
1212 gic_v3_redist_wake(struct gic_v3_softc *sc)
1213 {
1214 uint32_t waker;
1215 size_t us_left = 1000000;
1216
1217 waker = gic_r_read(sc, 4, GICR_WAKER);
1218 /* Wake up Re-Distributor for this CPU */
1219 waker &= ~GICR_WAKER_PS;
1220 gic_r_write(sc, 4, GICR_WAKER, waker);
1221 /*
1222 * When clearing ProcessorSleep bit it is required to wait for
1223 * ChildrenAsleep to become zero following the processor power-on.
1224 */
1225 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1226 DELAY(1);
1227 if (us_left-- == 0) {
1228 panic("Could not wake Re-Distributor for CPU%u",
1229 PCPU_GET(cpuid));
1230 }
1231 }
1232
1233 if (bootverbose) {
1234 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1235 PCPU_GET(cpuid));
1236 }
1237
1238 return (0);
1239 }
1240
1241 static int
1242 gic_v3_redist_init(struct gic_v3_softc *sc)
1243 {
1244 int err;
1245 size_t i;
1246
1247 err = gic_v3_redist_find(sc);
1248 if (err != 0)
1249 return (err);
1250
1251 err = gic_v3_redist_wake(sc);
1252 if (err != 0)
1253 return (err);
1254
1255 /* Configure SGIs and PPIs to be Group1 Non-secure */
1256 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1257 0xFFFFFFFF);
1258
1259 /* Disable SPIs */
1260 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1261 GICR_I_ENABLER_PPI_MASK);
1262 /* Enable SGIs */
1263 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1264 GICR_I_ENABLER_SGI_MASK);
1265
1266 /* Set priority for SGIs and PPIs */
1267 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1268 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1269 GIC_PRIORITY_MAX);
1270 }
1271
1272 gic_v3_wait_for_rwp(sc, REDIST);
1273
1274 return (0);
1275 }
Cache object: 4e08b64913a0449292660209bf94edd7
|