1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/12.0/sys/arm64/arm64/gic_v3.c 335052 2018-06-13 12:17:11Z andrew $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bitstring.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/rman.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/cpuset.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/smp.h>
54
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71
72 #include "pic_if.h"
73
74 #include <arm/arm/gic_common.h>
75 #include "gic_v3_reg.h"
76 #include "gic_v3_var.h"
77
78 static bus_get_domain_t gic_v3_get_domain;
79 static bus_read_ivar_t gic_v3_read_ivar;
80
81 static pic_disable_intr_t gic_v3_disable_intr;
82 static pic_enable_intr_t gic_v3_enable_intr;
83 static pic_map_intr_t gic_v3_map_intr;
84 static pic_setup_intr_t gic_v3_setup_intr;
85 static pic_teardown_intr_t gic_v3_teardown_intr;
86 static pic_post_filter_t gic_v3_post_filter;
87 static pic_post_ithread_t gic_v3_post_ithread;
88 static pic_pre_ithread_t gic_v3_pre_ithread;
89 static pic_bind_intr_t gic_v3_bind_intr;
90 #ifdef SMP
91 static pic_init_secondary_t gic_v3_init_secondary;
92 static pic_ipi_send_t gic_v3_ipi_send;
93 static pic_ipi_setup_t gic_v3_ipi_setup;
94 #endif
95
96 static u_int gic_irq_cpu;
97 #ifdef SMP
98 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
99 static u_int sgi_first_unused = GIC_FIRST_SGI;
100 #endif
101
102 static device_method_t gic_v3_methods[] = {
103 /* Device interface */
104 DEVMETHOD(device_detach, gic_v3_detach),
105
106 /* Bus interface */
107 DEVMETHOD(bus_get_domain, gic_v3_get_domain),
108 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
109
110 /* Interrupt controller interface */
111 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
112 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
113 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
114 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
115 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
116 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
117 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
118 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
119 #ifdef SMP
120 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
121 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
122 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
123 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
124 #endif
125
126 /* End */
127 DEVMETHOD_END
128 };
129
130 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
131 sizeof(struct gic_v3_softc));
132
133 /*
134 * Driver-specific definitions.
135 */
136 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
137
138 /*
139 * Helper functions and definitions.
140 */
141 /* Destination registers, either Distributor or Re-Distributor */
142 enum gic_v3_xdist {
143 DIST = 0,
144 REDIST,
145 };
146
147 struct gic_v3_irqsrc {
148 struct intr_irqsrc gi_isrc;
149 uint32_t gi_irq;
150 enum intr_polarity gi_pol;
151 enum intr_trigger gi_trig;
152 };
153
154 /* Helper routines starting with gic_v3_ */
155 static int gic_v3_dist_init(struct gic_v3_softc *);
156 static int gic_v3_redist_alloc(struct gic_v3_softc *);
157 static int gic_v3_redist_find(struct gic_v3_softc *);
158 static int gic_v3_redist_init(struct gic_v3_softc *);
159 static int gic_v3_cpu_init(struct gic_v3_softc *);
160 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
161
162 /* A sequence of init functions for primary (boot) CPU */
163 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
164 /* Primary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_primary_init[] = {
166 gic_v3_dist_init,
167 gic_v3_redist_alloc,
168 gic_v3_redist_init,
169 gic_v3_cpu_init,
170 NULL
171 };
172
173 #ifdef SMP
174 /* Secondary CPU initialization sequence */
175 static gic_v3_initseq_t gic_v3_secondary_init[] = {
176 gic_v3_redist_init,
177 gic_v3_cpu_init,
178 NULL
179 };
180 #endif
181
182 uint32_t
183 gic_r_read_4(device_t dev, bus_size_t offset)
184 {
185 struct gic_v3_softc *sc;
186
187 sc = device_get_softc(dev);
188 return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
189 }
190
191 uint64_t
192 gic_r_read_8(device_t dev, bus_size_t offset)
193 {
194 struct gic_v3_softc *sc;
195
196 sc = device_get_softc(dev);
197 return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
198 }
199
200 void
201 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
202 {
203 struct gic_v3_softc *sc;
204
205 sc = device_get_softc(dev);
206 bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
207 }
208
209 void
210 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
211 {
212 struct gic_v3_softc *sc;
213
214 sc = device_get_softc(dev);
215 bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
216 }
217
218 /*
219 * Device interface.
220 */
221 int
222 gic_v3_attach(device_t dev)
223 {
224 struct gic_v3_softc *sc;
225 gic_v3_initseq_t *init_func;
226 uint32_t typer;
227 int rid;
228 int err;
229 size_t i;
230 u_int irq;
231 const char *name;
232
233 sc = device_get_softc(dev);
234 sc->gic_registered = FALSE;
235 sc->dev = dev;
236 err = 0;
237
238 /* Initialize mutex */
239 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
240
241 /*
242 * Allocate array of struct resource.
243 * One entry for Distributor and all remaining for Re-Distributor.
244 */
245 sc->gic_res = malloc(
246 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
247 M_GIC_V3, M_WAITOK);
248
249 /* Now allocate corresponding resources */
250 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
251 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
252 &rid, RF_ACTIVE);
253 if (sc->gic_res[rid] == NULL)
254 return (ENXIO);
255 }
256
257 /*
258 * Distributor interface
259 */
260 sc->gic_dist = sc->gic_res[0];
261
262 /*
263 * Re-Dristributor interface
264 */
265 /* Allocate space under region descriptions */
266 sc->gic_redists.regions = malloc(
267 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
268 M_GIC_V3, M_WAITOK);
269
270 /* Fill-up bus_space information for each region. */
271 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
272 sc->gic_redists.regions[i] = sc->gic_res[rid];
273
274 /* Get the number of supported SPI interrupts */
275 typer = gic_d_read(sc, 4, GICD_TYPER);
276 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
277 if (sc->gic_nirqs > GIC_I_NUM_MAX)
278 sc->gic_nirqs = GIC_I_NUM_MAX;
279
280 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
281 M_GIC_V3, M_WAITOK | M_ZERO);
282 name = device_get_nameunit(dev);
283 for (irq = 0; irq < sc->gic_nirqs; irq++) {
284 struct intr_irqsrc *isrc;
285
286 sc->gic_irqs[irq].gi_irq = irq;
287 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
288 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
289
290 isrc = &sc->gic_irqs[irq].gi_isrc;
291 if (irq <= GIC_LAST_SGI) {
292 err = intr_isrc_register(isrc, sc->dev,
293 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
294 } else if (irq <= GIC_LAST_PPI) {
295 err = intr_isrc_register(isrc, sc->dev,
296 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
297 } else {
298 err = intr_isrc_register(isrc, sc->dev, 0,
299 "%s,s%u", name, irq - GIC_FIRST_SPI);
300 }
301 if (err != 0) {
302 /* XXX call intr_isrc_deregister() */
303 free(sc->gic_irqs, M_DEVBUF);
304 return (err);
305 }
306 }
307
308 /*
309 * Read the Peripheral ID2 register. This is an implementation
310 * defined register, but seems to be implemented in all GICv3
311 * parts and Linux expects it to be there.
312 */
313 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
314
315 /* Get the number of supported interrupt identifier bits */
316 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
317
318 if (bootverbose) {
319 device_printf(dev, "SPIs: %u, IDs: %u\n",
320 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
321 }
322
323 /* Train init sequence for boot CPU */
324 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
325 err = (*init_func)(sc);
326 if (err != 0)
327 return (err);
328 }
329
330 return (0);
331 }
332
333 int
334 gic_v3_detach(device_t dev)
335 {
336 struct gic_v3_softc *sc;
337 size_t i;
338 int rid;
339
340 sc = device_get_softc(dev);
341
342 if (device_is_attached(dev)) {
343 /*
344 * XXX: We should probably deregister PIC
345 */
346 if (sc->gic_registered)
347 panic("Trying to detach registered PIC");
348 }
349 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
350 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
351
352 for (i = 0; i <= mp_maxid; i++)
353 free(sc->gic_redists.pcpu[i], M_GIC_V3);
354
355 free(sc->gic_res, M_GIC_V3);
356 free(sc->gic_redists.regions, M_GIC_V3);
357
358 return (0);
359 }
360
361 static int
362 gic_v3_get_domain(device_t dev, device_t child, int *domain)
363 {
364 struct gic_v3_devinfo *di;
365
366 di = device_get_ivars(child);
367 if (di->gic_domain < 0)
368 return (ENOENT);
369
370 *domain = di->gic_domain;
371 return (0);
372 }
373
374 static int
375 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
376 {
377 struct gic_v3_softc *sc;
378
379 sc = device_get_softc(dev);
380
381 switch (which) {
382 case GICV3_IVAR_NIRQS:
383 *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
384 return (0);
385 case GICV3_IVAR_REDIST_VADDR:
386 *result = (uintptr_t)rman_get_virtual(
387 sc->gic_redists.pcpu[PCPU_GET(cpuid)]);
388 return (0);
389 case GIC_IVAR_HW_REV:
390 KASSERT(
391 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
392 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
393 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
394 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
395 *result = GICR_PIDR2_ARCH(sc->gic_pidr2);
396 return (0);
397 case GIC_IVAR_BUS:
398 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
399 ("gic_v3_read_ivar: Unknown bus type"));
400 KASSERT(sc->gic_bus <= GIC_BUS_MAX,
401 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
402 *result = sc->gic_bus;
403 return (0);
404 }
405
406 return (ENOENT);
407 }
408
409 int
410 arm_gic_v3_intr(void *arg)
411 {
412 struct gic_v3_softc *sc = arg;
413 struct gic_v3_irqsrc *gi;
414 struct intr_pic *pic;
415 uint64_t active_irq;
416 struct trapframe *tf;
417
418 pic = sc->gic_pic;
419
420 while (1) {
421 if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
422 /*
423 * Hardware: Cavium ThunderX
424 * Chip revision: Pass 1.0 (early version)
425 * Pass 1.1 (production)
426 * ERRATUM: 22978, 23154
427 */
428 __asm __volatile(
429 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
430 "mrs %0, ICC_IAR1_EL1 \n"
431 "nop;nop;nop;nop; \n"
432 "dsb sy \n"
433 : "=&r" (active_irq));
434 } else {
435 active_irq = gic_icc_read(IAR1);
436 }
437
438 if (active_irq >= GIC_FIRST_LPI) {
439 intr_child_irq_handler(pic, active_irq);
440 continue;
441 }
442
443 if (__predict_false(active_irq >= sc->gic_nirqs))
444 return (FILTER_HANDLED);
445
446 tf = curthread->td_intr_frame;
447 gi = &sc->gic_irqs[active_irq];
448 if (active_irq <= GIC_LAST_SGI) {
449 /* Call EOI for all IPI before dispatch. */
450 gic_icc_write(EOIR1, (uint64_t)active_irq);
451 #ifdef SMP
452 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
453 #else
454 device_printf(sc->dev, "SGI %ju on UP system detected\n",
455 (uintmax_t)(active_irq - GIC_FIRST_SGI));
456 #endif
457 } else if (active_irq >= GIC_FIRST_PPI &&
458 active_irq <= GIC_LAST_SPI) {
459 if (gi->gi_trig == INTR_TRIGGER_EDGE)
460 gic_icc_write(EOIR1, gi->gi_irq);
461
462 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
463 if (gi->gi_trig != INTR_TRIGGER_EDGE)
464 gic_icc_write(EOIR1, gi->gi_irq);
465 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
466 device_printf(sc->dev,
467 "Stray irq %lu disabled\n", active_irq);
468 }
469 }
470 }
471 }
472
473 #ifdef FDT
474 static int
475 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
476 enum intr_polarity *polp, enum intr_trigger *trigp)
477 {
478 u_int irq;
479
480 if (ncells < 3)
481 return (EINVAL);
482
483 /*
484 * The 1st cell is the interrupt type:
485 * 0 = SPI
486 * 1 = PPI
487 * The 2nd cell contains the interrupt number:
488 * [0 - 987] for SPI
489 * [0 - 15] for PPI
490 * The 3rd cell is the flags, encoded as follows:
491 * bits[3:0] trigger type and level flags
492 * 1 = edge triggered
493 * 2 = edge triggered (PPI only)
494 * 4 = level-sensitive
495 * 8 = level-sensitive (PPI only)
496 */
497 switch (cells[0]) {
498 case 0:
499 irq = GIC_FIRST_SPI + cells[1];
500 /* SPI irq is checked later. */
501 break;
502 case 1:
503 irq = GIC_FIRST_PPI + cells[1];
504 if (irq > GIC_LAST_PPI) {
505 device_printf(dev, "unsupported PPI interrupt "
506 "number %u\n", cells[1]);
507 return (EINVAL);
508 }
509 break;
510 default:
511 device_printf(dev, "unsupported interrupt type "
512 "configuration %u\n", cells[0]);
513 return (EINVAL);
514 }
515
516 switch (cells[2] & FDT_INTR_MASK) {
517 case FDT_INTR_EDGE_RISING:
518 *trigp = INTR_TRIGGER_EDGE;
519 *polp = INTR_POLARITY_HIGH;
520 break;
521 case FDT_INTR_EDGE_FALLING:
522 *trigp = INTR_TRIGGER_EDGE;
523 *polp = INTR_POLARITY_LOW;
524 break;
525 case FDT_INTR_LEVEL_HIGH:
526 *trigp = INTR_TRIGGER_LEVEL;
527 *polp = INTR_POLARITY_HIGH;
528 break;
529 case FDT_INTR_LEVEL_LOW:
530 *trigp = INTR_TRIGGER_LEVEL;
531 *polp = INTR_POLARITY_LOW;
532 break;
533 default:
534 device_printf(dev, "unsupported trigger/polarity "
535 "configuration 0x%02x\n", cells[2]);
536 return (EINVAL);
537 }
538
539 /* Check the interrupt is valid */
540 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
541 return (EINVAL);
542
543 *irqp = irq;
544 return (0);
545 }
546 #endif
547
548 static int
549 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
550 enum intr_polarity *polp, enum intr_trigger *trigp)
551 {
552 struct gic_v3_irqsrc *gi;
553
554 /* SPI-mapped MSI */
555 gi = (struct gic_v3_irqsrc *)msi_data->isrc;
556 if (gi == NULL)
557 return (ENXIO);
558
559 *irqp = gi->gi_irq;
560
561 /* MSI/MSI-X interrupts are always edge triggered with high polarity */
562 *polp = INTR_POLARITY_HIGH;
563 *trigp = INTR_TRIGGER_EDGE;
564
565 return (0);
566 }
567
568 static int
569 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
570 enum intr_polarity *polp, enum intr_trigger *trigp)
571 {
572 struct gic_v3_softc *sc;
573 enum intr_polarity pol;
574 enum intr_trigger trig;
575 struct intr_map_data_msi *dam;
576 #ifdef FDT
577 struct intr_map_data_fdt *daf;
578 #endif
579 #ifdef DEV_ACPI
580 struct intr_map_data_acpi *daa;
581 #endif
582 u_int irq;
583
584 sc = device_get_softc(dev);
585
586 switch (data->type) {
587 #ifdef FDT
588 case INTR_MAP_DATA_FDT:
589 daf = (struct intr_map_data_fdt *)data;
590 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
591 &trig) != 0)
592 return (EINVAL);
593 break;
594 #endif
595 #ifdef DEV_ACPI
596 case INTR_MAP_DATA_ACPI:
597 daa = (struct intr_map_data_acpi *)data;
598 irq = daa->irq;
599 pol = daa->pol;
600 trig = daa->trig;
601 break;
602 #endif
603 case INTR_MAP_DATA_MSI:
604 /* SPI-mapped MSI */
605 dam = (struct intr_map_data_msi *)data;
606 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
607 return (EINVAL);
608 break;
609 default:
610 return (EINVAL);
611 }
612
613 if (irq >= sc->gic_nirqs)
614 return (EINVAL);
615 switch (pol) {
616 case INTR_POLARITY_CONFORM:
617 case INTR_POLARITY_LOW:
618 case INTR_POLARITY_HIGH:
619 break;
620 default:
621 return (EINVAL);
622 }
623 switch (trig) {
624 case INTR_TRIGGER_CONFORM:
625 case INTR_TRIGGER_EDGE:
626 case INTR_TRIGGER_LEVEL:
627 break;
628 default:
629 return (EINVAL);
630 }
631
632 *irqp = irq;
633 if (polp != NULL)
634 *polp = pol;
635 if (trigp != NULL)
636 *trigp = trig;
637 return (0);
638 }
639
640 static int
641 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
642 struct intr_irqsrc **isrcp)
643 {
644 struct gic_v3_softc *sc;
645 int error;
646 u_int irq;
647
648 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
649 if (error == 0) {
650 sc = device_get_softc(dev);
651 *isrcp = GIC_INTR_ISRC(sc, irq);
652 }
653 return (error);
654 }
655
656 static int
657 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
658 struct resource *res, struct intr_map_data *data)
659 {
660 struct gic_v3_softc *sc = device_get_softc(dev);
661 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
662 enum intr_trigger trig;
663 enum intr_polarity pol;
664 uint32_t reg;
665 u_int irq;
666 int error;
667
668 if (data == NULL)
669 return (ENOTSUP);
670
671 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
672 if (error != 0)
673 return (error);
674
675 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
676 trig == INTR_TRIGGER_CONFORM)
677 return (EINVAL);
678
679 /* Compare config if this is not first setup. */
680 if (isrc->isrc_handlers != 0) {
681 if (pol != gi->gi_pol || trig != gi->gi_trig)
682 return (EINVAL);
683 else
684 return (0);
685 }
686
687 gi->gi_pol = pol;
688 gi->gi_trig = trig;
689
690 /*
691 * XXX - In case that per CPU interrupt is going to be enabled in time
692 * when SMP is already started, we need some IPI call which
693 * enables it on others CPUs. Further, it's more complicated as
694 * pic_enable_source() and pic_disable_source() should act on
695 * per CPU basis only. Thus, it should be solved here somehow.
696 */
697 if (isrc->isrc_flags & INTR_ISRCF_PPI)
698 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
699
700 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
701 mtx_lock_spin(&sc->gic_mtx);
702
703 /* Set the trigger and polarity */
704 if (irq <= GIC_LAST_PPI)
705 reg = gic_r_read(sc, 4,
706 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
707 else
708 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
709 if (trig == INTR_TRIGGER_LEVEL)
710 reg &= ~(2 << ((irq % 16) * 2));
711 else
712 reg |= 2 << ((irq % 16) * 2);
713
714 if (irq <= GIC_LAST_PPI) {
715 gic_r_write(sc, 4,
716 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
717 gic_v3_wait_for_rwp(sc, REDIST);
718 } else {
719 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
720 gic_v3_wait_for_rwp(sc, DIST);
721 }
722
723 mtx_unlock_spin(&sc->gic_mtx);
724
725 gic_v3_bind_intr(dev, isrc);
726 }
727
728 return (0);
729 }
730
731 static int
732 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
733 struct resource *res, struct intr_map_data *data)
734 {
735 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
736
737 if (isrc->isrc_handlers == 0) {
738 gi->gi_pol = INTR_POLARITY_CONFORM;
739 gi->gi_trig = INTR_TRIGGER_CONFORM;
740 }
741
742 return (0);
743 }
744
745 static void
746 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
747 {
748 struct gic_v3_softc *sc;
749 struct gic_v3_irqsrc *gi;
750 u_int irq;
751
752 sc = device_get_softc(dev);
753 gi = (struct gic_v3_irqsrc *)isrc;
754 irq = gi->gi_irq;
755
756 if (irq <= GIC_LAST_PPI) {
757 /* SGIs and PPIs in corresponding Re-Distributor */
758 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
759 GICD_I_MASK(irq));
760 gic_v3_wait_for_rwp(sc, REDIST);
761 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
762 /* SPIs in distributor */
763 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
764 gic_v3_wait_for_rwp(sc, DIST);
765 } else
766 panic("%s: Unsupported IRQ %u", __func__, irq);
767 }
768
769 static void
770 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
771 {
772 struct gic_v3_softc *sc;
773 struct gic_v3_irqsrc *gi;
774 u_int irq;
775
776 sc = device_get_softc(dev);
777 gi = (struct gic_v3_irqsrc *)isrc;
778 irq = gi->gi_irq;
779
780 if (irq <= GIC_LAST_PPI) {
781 /* SGIs and PPIs in corresponding Re-Distributor */
782 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
783 GICD_I_MASK(irq));
784 gic_v3_wait_for_rwp(sc, REDIST);
785 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
786 /* SPIs in distributor */
787 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
788 gic_v3_wait_for_rwp(sc, DIST);
789 } else
790 panic("%s: Unsupported IRQ %u", __func__, irq);
791 }
792
793 static void
794 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
795 {
796 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
797
798 gic_v3_disable_intr(dev, isrc);
799 gic_icc_write(EOIR1, gi->gi_irq);
800 }
801
802 static void
803 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
804 {
805
806 gic_v3_enable_intr(dev, isrc);
807 }
808
809 static void
810 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
811 {
812 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
813
814 if (gi->gi_trig == INTR_TRIGGER_EDGE)
815 return;
816
817 gic_icc_write(EOIR1, gi->gi_irq);
818 }
819
820 static int
821 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
822 {
823 struct gic_v3_softc *sc;
824 struct gic_v3_irqsrc *gi;
825 int cpu;
826
827 gi = (struct gic_v3_irqsrc *)isrc;
828 if (gi->gi_irq <= GIC_LAST_PPI)
829 return (EINVAL);
830
831 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
832 ("%s: Attempting to bind an invalid IRQ", __func__));
833
834 sc = device_get_softc(dev);
835
836 if (CPU_EMPTY(&isrc->isrc_cpu)) {
837 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
838 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
839 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
840 CPU_AFFINITY(gic_irq_cpu));
841 } else {
842 /*
843 * We can only bind to a single CPU so select
844 * the first CPU found.
845 */
846 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
847 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
848 }
849
850 return (0);
851 }
852
853 #ifdef SMP
854 static void
855 gic_v3_init_secondary(device_t dev)
856 {
857 device_t child;
858 struct gic_v3_softc *sc;
859 gic_v3_initseq_t *init_func;
860 struct intr_irqsrc *isrc;
861 u_int cpu, irq;
862 int err, i;
863
864 sc = device_get_softc(dev);
865 cpu = PCPU_GET(cpuid);
866
867 /* Train init sequence for boot CPU */
868 for (init_func = gic_v3_secondary_init; *init_func != NULL;
869 init_func++) {
870 err = (*init_func)(sc);
871 if (err != 0) {
872 device_printf(dev,
873 "Could not initialize GIC for CPU%u\n", cpu);
874 return;
875 }
876 }
877
878 /* Unmask attached SGI interrupts. */
879 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
880 isrc = GIC_INTR_ISRC(sc, irq);
881 if (intr_isrc_init_on_cpu(isrc, cpu))
882 gic_v3_enable_intr(dev, isrc);
883 }
884
885 /* Unmask attached PPI interrupts. */
886 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
887 isrc = GIC_INTR_ISRC(sc, irq);
888 if (intr_isrc_init_on_cpu(isrc, cpu))
889 gic_v3_enable_intr(dev, isrc);
890 }
891
892 for (i = 0; i < sc->gic_nchildren; i++) {
893 child = sc->gic_children[i];
894 PIC_INIT_SECONDARY(child);
895 }
896 }
897
898 static void
899 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
900 u_int ipi)
901 {
902 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
903 uint64_t aff, val, irq;
904 int i;
905
906 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
907 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
908 aff = GIC_AFFINITY(0);
909 irq = gi->gi_irq;
910 val = 0;
911
912 /* Iterate through all CPUs in set */
913 for (i = 0; i <= mp_maxid; i++) {
914 /* Move to the next affinity group */
915 if (aff != GIC_AFFINITY(i)) {
916 /* Send the IPI */
917 if (val != 0) {
918 gic_icc_write(SGI1R, val);
919 val = 0;
920 }
921 aff = GIC_AFFINITY(i);
922 }
923
924 /* Send the IPI to this cpu */
925 if (CPU_ISSET(i, &cpus)) {
926 #define ICC_SGI1R_AFFINITY(aff) \
927 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
928 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
929 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
930 /* Set the affinity when the first at this level */
931 if (val == 0)
932 val = ICC_SGI1R_AFFINITY(aff) |
933 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
934 /* Set the bit to send the IPI to te CPU */
935 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
936 }
937 }
938
939 /* Send the IPI to the last cpu affinity group */
940 if (val != 0)
941 gic_icc_write(SGI1R, val);
942 #undef GIC_AFF_MASK
943 #undef GIC_AFFINITY
944 }
945
946 static int
947 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
948 {
949 struct intr_irqsrc *isrc;
950 struct gic_v3_softc *sc = device_get_softc(dev);
951
952 if (sgi_first_unused > GIC_LAST_SGI)
953 return (ENOSPC);
954
955 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
956 sgi_to_ipi[sgi_first_unused++] = ipi;
957
958 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
959
960 *isrcp = isrc;
961 return (0);
962 }
963 #endif /* SMP */
964
965 /*
966 * Helper routines
967 */
968 static void
969 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
970 {
971 struct resource *res;
972 u_int cpuid;
973 size_t us_left = 1000000;
974
975 cpuid = PCPU_GET(cpuid);
976
977 switch (xdist) {
978 case DIST:
979 res = sc->gic_dist;
980 break;
981 case REDIST:
982 res = sc->gic_redists.pcpu[cpuid];
983 break;
984 default:
985 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
986 return;
987 }
988
989 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
990 DELAY(1);
991 if (us_left-- == 0)
992 panic("GICD Register write pending for too long");
993 }
994 }
995
996 /* CPU interface. */
997 static __inline void
998 gic_v3_cpu_priority(uint64_t mask)
999 {
1000
1001 /* Set prority mask */
1002 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1003 }
1004
1005 static int
1006 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1007 {
1008 uint64_t sre;
1009 u_int cpuid;
1010
1011 cpuid = PCPU_GET(cpuid);
1012 /*
1013 * Set the SRE bit to enable access to GIC CPU interface
1014 * via system registers.
1015 */
1016 sre = READ_SPECIALREG(icc_sre_el1);
1017 sre |= ICC_SRE_EL1_SRE;
1018 WRITE_SPECIALREG(icc_sre_el1, sre);
1019 isb();
1020 /*
1021 * Now ensure that the bit is set.
1022 */
1023 sre = READ_SPECIALREG(icc_sre_el1);
1024 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1025 /* We are done. This was disabled in EL2 */
1026 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1027 "via system registers\n", cpuid);
1028 return (ENXIO);
1029 } else if (bootverbose) {
1030 device_printf(sc->dev,
1031 "CPU%u enabled CPU interface via system registers\n",
1032 cpuid);
1033 }
1034
1035 return (0);
1036 }
1037
1038 static int
1039 gic_v3_cpu_init(struct gic_v3_softc *sc)
1040 {
1041 int err;
1042
1043 /* Enable access to CPU interface via system registers */
1044 err = gic_v3_cpu_enable_sre(sc);
1045 if (err != 0)
1046 return (err);
1047 /* Priority mask to minimum - accept all interrupts */
1048 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1049 /* Disable EOI mode */
1050 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1051 /* Enable group 1 (insecure) interrups */
1052 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1053
1054 return (0);
1055 }
1056
1057 /* Distributor */
1058 static int
1059 gic_v3_dist_init(struct gic_v3_softc *sc)
1060 {
1061 uint64_t aff;
1062 u_int i;
1063
1064 /*
1065 * 1. Disable the Distributor
1066 */
1067 gic_d_write(sc, 4, GICD_CTLR, 0);
1068 gic_v3_wait_for_rwp(sc, DIST);
1069
1070 /*
1071 * 2. Configure the Distributor
1072 */
1073 /* Set all SPIs to be Group 1 Non-secure */
1074 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1075 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1076
1077 /* Set all global interrupts to be level triggered, active low. */
1078 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1079 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1080
1081 /* Set priority to all shared interrupts */
1082 for (i = GIC_FIRST_SPI;
1083 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1084 /* Set highest priority */
1085 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1086 }
1087
1088 /*
1089 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1090 * Re-Distributor registers.
1091 */
1092 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1093 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1094
1095 gic_v3_wait_for_rwp(sc, DIST);
1096
1097 /*
1098 * 3. Enable Distributor
1099 */
1100 /* Enable Distributor with ARE, Group 1 */
1101 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1102 GICD_CTLR_G1);
1103
1104 /*
1105 * 4. Route all interrupts to boot CPU.
1106 */
1107 aff = CPU_AFFINITY(0);
1108 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1109 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1110
1111 return (0);
1112 }
1113
1114 /* Re-Distributor */
1115 static int
1116 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1117 {
1118 u_int cpuid;
1119
1120 /* Allocate struct resource for all CPU's Re-Distributor registers */
1121 for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1122 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1123 sc->gic_redists.pcpu[cpuid] =
1124 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1125 M_GIC_V3, M_WAITOK);
1126 else
1127 sc->gic_redists.pcpu[cpuid] = NULL;
1128 return (0);
1129 }
1130
1131 static int
1132 gic_v3_redist_find(struct gic_v3_softc *sc)
1133 {
1134 struct resource r_res;
1135 bus_space_handle_t r_bsh;
1136 uint64_t aff;
1137 uint64_t typer;
1138 uint32_t pidr2;
1139 u_int cpuid;
1140 size_t i;
1141
1142 cpuid = PCPU_GET(cpuid);
1143
1144 aff = CPU_AFFINITY(cpuid);
1145 /* Affinity in format for comparison with typer */
1146 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1147 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1148
1149 if (bootverbose) {
1150 device_printf(sc->dev,
1151 "Start searching for Re-Distributor\n");
1152 }
1153 /* Iterate through Re-Distributor regions */
1154 for (i = 0; i < sc->gic_redists.nregions; i++) {
1155 /* Take a copy of the region's resource */
1156 r_res = *sc->gic_redists.regions[i];
1157 r_bsh = rman_get_bushandle(&r_res);
1158
1159 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1160 switch (GICR_PIDR2_ARCH(pidr2)) {
1161 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1162 case GICR_PIDR2_ARCH_GICv4:
1163 break;
1164 default:
1165 device_printf(sc->dev,
1166 "No Re-Distributor found for CPU%u\n", cpuid);
1167 return (ENODEV);
1168 }
1169
1170 do {
1171 typer = bus_read_8(&r_res, GICR_TYPER);
1172 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1173 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1174 ("Invalid pointer to per-CPU redistributor"));
1175 /* Copy res contents to its final destination */
1176 *sc->gic_redists.pcpu[cpuid] = r_res;
1177 if (bootverbose) {
1178 device_printf(sc->dev,
1179 "CPU%u Re-Distributor has been found\n",
1180 cpuid);
1181 }
1182 return (0);
1183 }
1184
1185 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1186 if ((typer & GICR_TYPER_VLPIS) != 0) {
1187 r_bsh +=
1188 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1189 }
1190
1191 rman_set_bushandle(&r_res, r_bsh);
1192 } while ((typer & GICR_TYPER_LAST) == 0);
1193 }
1194
1195 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1196 return (ENXIO);
1197 }
1198
1199 static int
1200 gic_v3_redist_wake(struct gic_v3_softc *sc)
1201 {
1202 uint32_t waker;
1203 size_t us_left = 1000000;
1204
1205 waker = gic_r_read(sc, 4, GICR_WAKER);
1206 /* Wake up Re-Distributor for this CPU */
1207 waker &= ~GICR_WAKER_PS;
1208 gic_r_write(sc, 4, GICR_WAKER, waker);
1209 /*
1210 * When clearing ProcessorSleep bit it is required to wait for
1211 * ChildrenAsleep to become zero following the processor power-on.
1212 */
1213 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1214 DELAY(1);
1215 if (us_left-- == 0) {
1216 panic("Could not wake Re-Distributor for CPU%u",
1217 PCPU_GET(cpuid));
1218 }
1219 }
1220
1221 if (bootverbose) {
1222 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1223 PCPU_GET(cpuid));
1224 }
1225
1226 return (0);
1227 }
1228
1229 static int
1230 gic_v3_redist_init(struct gic_v3_softc *sc)
1231 {
1232 int err;
1233 size_t i;
1234
1235 err = gic_v3_redist_find(sc);
1236 if (err != 0)
1237 return (err);
1238
1239 err = gic_v3_redist_wake(sc);
1240 if (err != 0)
1241 return (err);
1242
1243 /* Configure SGIs and PPIs to be Group1 Non-secure */
1244 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1245 0xFFFFFFFF);
1246
1247 /* Disable SPIs */
1248 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1249 GICR_I_ENABLER_PPI_MASK);
1250 /* Enable SGIs */
1251 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1252 GICR_I_ENABLER_SGI_MASK);
1253
1254 /* Set priority for SGIs and PPIs */
1255 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1256 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1257 GIC_PRIORITY_MAX);
1258 }
1259
1260 gic_v3_wait_for_rwp(sc, REDIST);
1261
1262 return (0);
1263 }
Cache object: f934e45c2c4aed6822951005cef37eb6
|