1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_platform.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/11.2/sys/arm64/arm64/gic_v3.c 305533 2016-09-07 13:10:02Z andrew $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60
61 #ifdef FDT
62 #include <dev/ofw/ofw_bus_subr.h>
63 #endif
64
65 #include "pic_if.h"
66
67 #include "gic_v3_reg.h"
68 #include "gic_v3_var.h"
69
70 static bus_read_ivar_t gic_v3_read_ivar;
71
72 static pic_disable_intr_t gic_v3_disable_intr;
73 static pic_enable_intr_t gic_v3_enable_intr;
74 static pic_map_intr_t gic_v3_map_intr;
75 static pic_setup_intr_t gic_v3_setup_intr;
76 static pic_teardown_intr_t gic_v3_teardown_intr;
77 static pic_post_filter_t gic_v3_post_filter;
78 static pic_post_ithread_t gic_v3_post_ithread;
79 static pic_pre_ithread_t gic_v3_pre_ithread;
80 static pic_bind_intr_t gic_v3_bind_intr;
81 #ifdef SMP
82 static pic_init_secondary_t gic_v3_init_secondary;
83 static pic_ipi_send_t gic_v3_ipi_send;
84 static pic_ipi_setup_t gic_v3_ipi_setup;
85 #endif
86
87 static u_int gic_irq_cpu;
88 #ifdef SMP
89 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
90 static u_int sgi_first_unused = GIC_FIRST_SGI;
91 #endif
92
93 static device_method_t gic_v3_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_detach, gic_v3_detach),
96
97 /* Bus interface */
98 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
99
100 /* Interrupt controller interface */
101 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
102 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
103 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
104 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
105 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
106 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
107 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
108 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
109 #ifdef SMP
110 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
111 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
112 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
113 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
114 #endif
115
116 /* End */
117 DEVMETHOD_END
118 };
119
120 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
121 sizeof(struct gic_v3_softc));
122
123 /*
124 * Driver-specific definitions.
125 */
126 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
127
128 /*
129 * Helper functions and definitions.
130 */
131 /* Destination registers, either Distributor or Re-Distributor */
132 enum gic_v3_xdist {
133 DIST = 0,
134 REDIST,
135 };
136
137 struct gic_v3_irqsrc {
138 struct intr_irqsrc gi_isrc;
139 uint32_t gi_irq;
140 enum intr_polarity gi_pol;
141 enum intr_trigger gi_trig;
142 };
143
144 /* Helper routines starting with gic_v3_ */
145 static int gic_v3_dist_init(struct gic_v3_softc *);
146 static int gic_v3_redist_alloc(struct gic_v3_softc *);
147 static int gic_v3_redist_find(struct gic_v3_softc *);
148 static int gic_v3_redist_init(struct gic_v3_softc *);
149 static int gic_v3_cpu_init(struct gic_v3_softc *);
150 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
151
152 /* A sequence of init functions for primary (boot) CPU */
153 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
154 /* Primary CPU initialization sequence */
155 static gic_v3_initseq_t gic_v3_primary_init[] = {
156 gic_v3_dist_init,
157 gic_v3_redist_alloc,
158 gic_v3_redist_init,
159 gic_v3_cpu_init,
160 NULL
161 };
162
163 #ifdef SMP
164 /* Secondary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_secondary_init[] = {
166 gic_v3_redist_init,
167 gic_v3_cpu_init,
168 NULL
169 };
170 #endif
171
172 uint32_t
173 gic_r_read_4(device_t dev, bus_size_t offset)
174 {
175 struct gic_v3_softc *sc;
176
177 sc = device_get_softc(dev);
178 return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
179 }
180
181 uint64_t
182 gic_r_read_8(device_t dev, bus_size_t offset)
183 {
184 struct gic_v3_softc *sc;
185
186 sc = device_get_softc(dev);
187 return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
188 }
189
190 void
191 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
192 {
193 struct gic_v3_softc *sc;
194
195 sc = device_get_softc(dev);
196 bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
197 }
198
199 void
200 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
201 {
202 struct gic_v3_softc *sc;
203
204 sc = device_get_softc(dev);
205 bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
206 }
207
208 /*
209 * Device interface.
210 */
211 int
212 gic_v3_attach(device_t dev)
213 {
214 struct gic_v3_softc *sc;
215 gic_v3_initseq_t *init_func;
216 uint32_t typer;
217 int rid;
218 int err;
219 size_t i;
220 u_int irq;
221 const char *name;
222
223 sc = device_get_softc(dev);
224 sc->gic_registered = FALSE;
225 sc->dev = dev;
226 err = 0;
227
228 /* Initialize mutex */
229 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
230
231 /*
232 * Allocate array of struct resource.
233 * One entry for Distributor and all remaining for Re-Distributor.
234 */
235 sc->gic_res = malloc(
236 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
237 M_GIC_V3, M_WAITOK);
238
239 /* Now allocate corresponding resources */
240 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
241 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
242 &rid, RF_ACTIVE);
243 if (sc->gic_res[rid] == NULL)
244 return (ENXIO);
245 }
246
247 /*
248 * Distributor interface
249 */
250 sc->gic_dist = sc->gic_res[0];
251
252 /*
253 * Re-Dristributor interface
254 */
255 /* Allocate space under region descriptions */
256 sc->gic_redists.regions = malloc(
257 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
258 M_GIC_V3, M_WAITOK);
259
260 /* Fill-up bus_space information for each region. */
261 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
262 sc->gic_redists.regions[i] = sc->gic_res[rid];
263
264 /* Get the number of supported SPI interrupts */
265 typer = gic_d_read(sc, 4, GICD_TYPER);
266 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
267 if (sc->gic_nirqs > GIC_I_NUM_MAX)
268 sc->gic_nirqs = GIC_I_NUM_MAX;
269
270 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
271 M_GIC_V3, M_WAITOK | M_ZERO);
272 name = device_get_nameunit(dev);
273 for (irq = 0; irq < sc->gic_nirqs; irq++) {
274 struct intr_irqsrc *isrc;
275
276 sc->gic_irqs[irq].gi_irq = irq;
277 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
278 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
279
280 isrc = &sc->gic_irqs[irq].gi_isrc;
281 if (irq <= GIC_LAST_SGI) {
282 err = intr_isrc_register(isrc, sc->dev,
283 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
284 } else if (irq <= GIC_LAST_PPI) {
285 err = intr_isrc_register(isrc, sc->dev,
286 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
287 } else {
288 err = intr_isrc_register(isrc, sc->dev, 0,
289 "%s,s%u", name, irq - GIC_FIRST_SPI);
290 }
291 if (err != 0) {
292 /* XXX call intr_isrc_deregister() */
293 free(sc->gic_irqs, M_DEVBUF);
294 return (err);
295 }
296 }
297
298 /* Get the number of supported interrupt identifier bits */
299 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
300
301 if (bootverbose) {
302 device_printf(dev, "SPIs: %u, IDs: %u\n",
303 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
304 }
305
306 /* Train init sequence for boot CPU */
307 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
308 err = (*init_func)(sc);
309 if (err != 0)
310 return (err);
311 }
312
313 return (0);
314 }
315
316 int
317 gic_v3_detach(device_t dev)
318 {
319 struct gic_v3_softc *sc;
320 size_t i;
321 int rid;
322
323 sc = device_get_softc(dev);
324
325 if (device_is_attached(dev)) {
326 /*
327 * XXX: We should probably deregister PIC
328 */
329 if (sc->gic_registered)
330 panic("Trying to detach registered PIC");
331 }
332 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
333 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
334
335 for (i = 0; i < mp_ncpus; i++)
336 free(sc->gic_redists.pcpu[i], M_GIC_V3);
337
338 free(sc->gic_res, M_GIC_V3);
339 free(sc->gic_redists.regions, M_GIC_V3);
340
341 return (0);
342 }
343
344 static int
345 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
346 {
347 struct gic_v3_softc *sc;
348
349 sc = device_get_softc(dev);
350
351 switch (which) {
352 case GICV3_IVAR_NIRQS:
353 *result = sc->gic_nirqs;
354 return (0);
355 case GICV3_IVAR_REDIST_VADDR:
356 *result = (uintptr_t)rman_get_virtual(
357 sc->gic_redists.pcpu[PCPU_GET(cpuid)]);
358 return (0);
359 }
360
361 return (ENOENT);
362 }
363
364 int
365 arm_gic_v3_intr(void *arg)
366 {
367 struct gic_v3_softc *sc = arg;
368 struct gic_v3_irqsrc *gi;
369 struct intr_pic *pic;
370 uint64_t active_irq;
371 struct trapframe *tf;
372 bool first;
373
374 first = true;
375 pic = sc->gic_pic;
376
377 while (1) {
378 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
379 /*
380 * Hardware: Cavium ThunderX
381 * Chip revision: Pass 1.0 (early version)
382 * Pass 1.1 (production)
383 * ERRATUM: 22978, 23154
384 */
385 __asm __volatile(
386 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
387 "mrs %0, ICC_IAR1_EL1 \n"
388 "nop;nop;nop;nop; \n"
389 "dsb sy \n"
390 : "=&r" (active_irq));
391 } else {
392 active_irq = gic_icc_read(IAR1);
393 }
394
395 if (active_irq >= GIC_FIRST_LPI) {
396 intr_child_irq_handler(pic, active_irq);
397 continue;
398 }
399
400 if (__predict_false(active_irq >= sc->gic_nirqs))
401 return (FILTER_HANDLED);
402
403 tf = curthread->td_intr_frame;
404 gi = &sc->gic_irqs[active_irq];
405 if (active_irq <= GIC_LAST_SGI) {
406 /* Call EOI for all IPI before dispatch. */
407 gic_icc_write(EOIR1, (uint64_t)active_irq);
408 #ifdef SMP
409 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
410 #else
411 device_printf(sc->dev, "SGI %ju on UP system detected\n",
412 (uintmax_t)(active_irq - GIC_FIRST_SGI));
413 #endif
414 } else if (active_irq >= GIC_FIRST_PPI &&
415 active_irq <= GIC_LAST_SPI) {
416 if (gi->gi_pol == INTR_TRIGGER_EDGE)
417 gic_icc_write(EOIR1, gi->gi_irq);
418
419 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
420 if (gi->gi_pol != INTR_TRIGGER_EDGE)
421 gic_icc_write(EOIR1, gi->gi_irq);
422 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
423 device_printf(sc->dev,
424 "Stray irq %lu disabled\n", active_irq);
425 }
426 }
427 }
428 }
429
430 #ifdef FDT
431 static int
432 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
433 enum intr_polarity *polp, enum intr_trigger *trigp)
434 {
435 u_int irq;
436
437 if (ncells < 3)
438 return (EINVAL);
439
440 /*
441 * The 1st cell is the interrupt type:
442 * 0 = SPI
443 * 1 = PPI
444 * The 2nd cell contains the interrupt number:
445 * [0 - 987] for SPI
446 * [0 - 15] for PPI
447 * The 3rd cell is the flags, encoded as follows:
448 * bits[3:0] trigger type and level flags
449 * 1 = edge triggered
450 * 2 = edge triggered (PPI only)
451 * 4 = level-sensitive
452 * 8 = level-sensitive (PPI only)
453 */
454 switch (cells[0]) {
455 case 0:
456 irq = GIC_FIRST_SPI + cells[1];
457 /* SPI irq is checked later. */
458 break;
459 case 1:
460 irq = GIC_FIRST_PPI + cells[1];
461 if (irq > GIC_LAST_PPI) {
462 device_printf(dev, "unsupported PPI interrupt "
463 "number %u\n", cells[1]);
464 return (EINVAL);
465 }
466 break;
467 default:
468 device_printf(dev, "unsupported interrupt type "
469 "configuration %u\n", cells[0]);
470 return (EINVAL);
471 }
472
473 switch (cells[2] & 0xf) {
474 case 1:
475 *trigp = INTR_TRIGGER_EDGE;
476 *polp = INTR_POLARITY_HIGH;
477 break;
478 case 2:
479 *trigp = INTR_TRIGGER_EDGE;
480 *polp = INTR_POLARITY_LOW;
481 break;
482 case 4:
483 *trigp = INTR_TRIGGER_LEVEL;
484 *polp = INTR_POLARITY_HIGH;
485 break;
486 case 8:
487 *trigp = INTR_TRIGGER_LEVEL;
488 *polp = INTR_POLARITY_LOW;
489 break;
490 default:
491 device_printf(dev, "unsupported trigger/polarity "
492 "configuration 0x%02x\n", cells[2]);
493 return (EINVAL);
494 }
495
496 /* Check the interrupt is valid */
497 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
498 return (EINVAL);
499
500 *irqp = irq;
501 return (0);
502 }
503 #endif
504
505 static int
506 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
507 enum intr_polarity *polp, enum intr_trigger *trigp)
508 {
509 struct gic_v3_softc *sc;
510 enum intr_polarity pol;
511 enum intr_trigger trig;
512 #ifdef FDT
513 struct intr_map_data_fdt *daf;
514 #endif
515 u_int irq;
516
517 sc = device_get_softc(dev);
518
519 switch (data->type) {
520 #ifdef FDT
521 case INTR_MAP_DATA_FDT:
522 daf = (struct intr_map_data_fdt *)data;
523 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
524 &trig) != 0)
525 return (EINVAL);
526 break;
527 #endif
528 default:
529 return (EINVAL);
530 }
531
532 if (irq >= sc->gic_nirqs)
533 return (EINVAL);
534 switch (pol) {
535 case INTR_POLARITY_CONFORM:
536 case INTR_POLARITY_LOW:
537 case INTR_POLARITY_HIGH:
538 break;
539 default:
540 return (EINVAL);
541 }
542 switch (trig) {
543 case INTR_TRIGGER_CONFORM:
544 case INTR_TRIGGER_EDGE:
545 case INTR_TRIGGER_LEVEL:
546 break;
547 default:
548 return (EINVAL);
549 }
550
551 *irqp = irq;
552 if (polp != NULL)
553 *polp = pol;
554 if (trigp != NULL)
555 *trigp = trig;
556 return (0);
557 }
558
559 static int
560 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
561 struct intr_irqsrc **isrcp)
562 {
563 struct gic_v3_softc *sc;
564 int error;
565 u_int irq;
566
567 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
568 if (error == 0) {
569 sc = device_get_softc(dev);
570 *isrcp = GIC_INTR_ISRC(sc, irq);
571 }
572 return (error);
573 }
574
575 static int
576 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
577 struct resource *res, struct intr_map_data *data)
578 {
579 struct gic_v3_softc *sc = device_get_softc(dev);
580 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
581 enum intr_trigger trig;
582 enum intr_polarity pol;
583 uint32_t reg;
584 u_int irq;
585 int error;
586
587 if (data == NULL)
588 return (ENOTSUP);
589
590 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
591 if (error != 0)
592 return (error);
593
594 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
595 trig == INTR_TRIGGER_CONFORM)
596 return (EINVAL);
597
598 /* Compare config if this is not first setup. */
599 if (isrc->isrc_handlers != 0) {
600 if (pol != gi->gi_pol || trig != gi->gi_trig)
601 return (EINVAL);
602 else
603 return (0);
604 }
605
606 gi->gi_pol = pol;
607 gi->gi_trig = trig;
608
609 /*
610 * XXX - In case that per CPU interrupt is going to be enabled in time
611 * when SMP is already started, we need some IPI call which
612 * enables it on others CPUs. Further, it's more complicated as
613 * pic_enable_source() and pic_disable_source() should act on
614 * per CPU basis only. Thus, it should be solved here somehow.
615 */
616 if (isrc->isrc_flags & INTR_ISRCF_PPI)
617 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
618
619 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
620 mtx_lock_spin(&sc->gic_mtx);
621
622 /* Set the trigger and polarity */
623 if (irq <= GIC_LAST_PPI)
624 reg = gic_r_read(sc, 4,
625 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
626 else
627 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
628 if (trig == INTR_TRIGGER_LEVEL)
629 reg &= ~(2 << ((irq % 16) * 2));
630 else
631 reg |= 2 << ((irq % 16) * 2);
632
633 if (irq <= GIC_LAST_PPI) {
634 gic_r_write(sc, 4,
635 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
636 gic_v3_wait_for_rwp(sc, REDIST);
637 } else {
638 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
639 gic_v3_wait_for_rwp(sc, DIST);
640 }
641
642 mtx_unlock_spin(&sc->gic_mtx);
643
644 gic_v3_bind_intr(dev, isrc);
645 }
646
647 return (0);
648 }
649
650 static int
651 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
652 struct resource *res, struct intr_map_data *data)
653 {
654 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
655
656 if (isrc->isrc_handlers == 0) {
657 gi->gi_pol = INTR_POLARITY_CONFORM;
658 gi->gi_trig = INTR_TRIGGER_CONFORM;
659 }
660
661 return (0);
662 }
663
664 static void
665 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
666 {
667 struct gic_v3_softc *sc;
668 struct gic_v3_irqsrc *gi;
669 u_int irq;
670
671 sc = device_get_softc(dev);
672 gi = (struct gic_v3_irqsrc *)isrc;
673 irq = gi->gi_irq;
674
675 if (irq <= GIC_LAST_PPI) {
676 /* SGIs and PPIs in corresponding Re-Distributor */
677 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
678 GICD_I_MASK(irq));
679 gic_v3_wait_for_rwp(sc, REDIST);
680 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
681 /* SPIs in distributor */
682 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
683 gic_v3_wait_for_rwp(sc, DIST);
684 } else
685 panic("%s: Unsupported IRQ %u", __func__, irq);
686 }
687
688 static void
689 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
690 {
691 struct gic_v3_softc *sc;
692 struct gic_v3_irqsrc *gi;
693 u_int irq;
694
695 sc = device_get_softc(dev);
696 gi = (struct gic_v3_irqsrc *)isrc;
697 irq = gi->gi_irq;
698
699 if (irq <= GIC_LAST_PPI) {
700 /* SGIs and PPIs in corresponding Re-Distributor */
701 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
702 GICD_I_MASK(irq));
703 gic_v3_wait_for_rwp(sc, REDIST);
704 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
705 /* SPIs in distributor */
706 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
707 gic_v3_wait_for_rwp(sc, DIST);
708 } else
709 panic("%s: Unsupported IRQ %u", __func__, irq);
710 }
711
712 static void
713 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
714 {
715 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
716
717 gic_v3_disable_intr(dev, isrc);
718 gic_icc_write(EOIR1, gi->gi_irq);
719 }
720
721 static void
722 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
723 {
724
725 gic_v3_enable_intr(dev, isrc);
726 }
727
728 static void
729 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
730 {
731 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
732
733 if (gi->gi_pol == INTR_TRIGGER_EDGE)
734 return;
735
736 gic_icc_write(EOIR1, gi->gi_irq);
737 }
738
739 static int
740 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
741 {
742 struct gic_v3_softc *sc;
743 struct gic_v3_irqsrc *gi;
744 int cpu;
745
746 gi = (struct gic_v3_irqsrc *)isrc;
747 if (gi->gi_irq <= GIC_LAST_PPI)
748 return (EINVAL);
749
750 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
751 ("%s: Attempting to bind an invalid IRQ", __func__));
752
753 sc = device_get_softc(dev);
754
755 if (CPU_EMPTY(&isrc->isrc_cpu)) {
756 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
757 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
758 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
759 CPU_AFFINITY(gic_irq_cpu));
760 } else {
761 /*
762 * We can only bind to a single CPU so select
763 * the first CPU found.
764 */
765 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
766 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
767 }
768
769 return (0);
770 }
771
772 #ifdef SMP
773 static void
774 gic_v3_init_secondary(device_t dev)
775 {
776 device_t child;
777 struct gic_v3_softc *sc;
778 gic_v3_initseq_t *init_func;
779 struct intr_irqsrc *isrc;
780 u_int cpu, irq;
781 int err, i;
782
783 sc = device_get_softc(dev);
784 cpu = PCPU_GET(cpuid);
785
786 /* Train init sequence for boot CPU */
787 for (init_func = gic_v3_secondary_init; *init_func != NULL;
788 init_func++) {
789 err = (*init_func)(sc);
790 if (err != 0) {
791 device_printf(dev,
792 "Could not initialize GIC for CPU%u\n", cpu);
793 return;
794 }
795 }
796
797 /* Unmask attached SGI interrupts. */
798 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
799 isrc = GIC_INTR_ISRC(sc, irq);
800 if (intr_isrc_init_on_cpu(isrc, cpu))
801 gic_v3_enable_intr(dev, isrc);
802 }
803
804 /* Unmask attached PPI interrupts. */
805 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
806 isrc = GIC_INTR_ISRC(sc, irq);
807 if (intr_isrc_init_on_cpu(isrc, cpu))
808 gic_v3_enable_intr(dev, isrc);
809 }
810
811 for (i = 0; i < sc->gic_nchildren; i++) {
812 child = sc->gic_children[i];
813 PIC_INIT_SECONDARY(child);
814 }
815 }
816
817 static void
818 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
819 u_int ipi)
820 {
821 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
822 uint64_t aff, val, irq;
823 int i;
824
825 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
826 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
827 aff = GIC_AFFINITY(0);
828 irq = gi->gi_irq;
829 val = 0;
830
831 /* Iterate through all CPUs in set */
832 for (i = 0; i < mp_ncpus; i++) {
833 /* Move to the next affinity group */
834 if (aff != GIC_AFFINITY(i)) {
835 /* Send the IPI */
836 if (val != 0) {
837 gic_icc_write(SGI1R, val);
838 val = 0;
839 }
840 aff = GIC_AFFINITY(i);
841 }
842
843 /* Send the IPI to this cpu */
844 if (CPU_ISSET(i, &cpus)) {
845 #define ICC_SGI1R_AFFINITY(aff) \
846 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
847 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
848 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
849 /* Set the affinity when the first at this level */
850 if (val == 0)
851 val = ICC_SGI1R_AFFINITY(aff) |
852 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
853 /* Set the bit to send the IPI to te CPU */
854 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
855 }
856 }
857
858 /* Send the IPI to the last cpu affinity group */
859 if (val != 0)
860 gic_icc_write(SGI1R, val);
861 #undef GIC_AFF_MASK
862 #undef GIC_AFFINITY
863 }
864
865 static int
866 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
867 {
868 struct intr_irqsrc *isrc;
869 struct gic_v3_softc *sc = device_get_softc(dev);
870
871 if (sgi_first_unused > GIC_LAST_SGI)
872 return (ENOSPC);
873
874 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
875 sgi_to_ipi[sgi_first_unused++] = ipi;
876
877 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
878
879 *isrcp = isrc;
880 return (0);
881 }
882 #endif /* SMP */
883
884 /*
885 * Helper routines
886 */
887 static void
888 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
889 {
890 struct resource *res;
891 u_int cpuid;
892 size_t us_left = 1000000;
893
894 cpuid = PCPU_GET(cpuid);
895
896 switch (xdist) {
897 case DIST:
898 res = sc->gic_dist;
899 break;
900 case REDIST:
901 res = sc->gic_redists.pcpu[cpuid];
902 break;
903 default:
904 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
905 return;
906 }
907
908 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
909 DELAY(1);
910 if (us_left-- == 0)
911 panic("GICD Register write pending for too long");
912 }
913 }
914
915 /* CPU interface. */
916 static __inline void
917 gic_v3_cpu_priority(uint64_t mask)
918 {
919
920 /* Set prority mask */
921 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
922 }
923
924 static int
925 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
926 {
927 uint64_t sre;
928 u_int cpuid;
929
930 cpuid = PCPU_GET(cpuid);
931 /*
932 * Set the SRE bit to enable access to GIC CPU interface
933 * via system registers.
934 */
935 sre = READ_SPECIALREG(icc_sre_el1);
936 sre |= ICC_SRE_EL1_SRE;
937 WRITE_SPECIALREG(icc_sre_el1, sre);
938 isb();
939 /*
940 * Now ensure that the bit is set.
941 */
942 sre = READ_SPECIALREG(icc_sre_el1);
943 if ((sre & ICC_SRE_EL1_SRE) == 0) {
944 /* We are done. This was disabled in EL2 */
945 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
946 "via system registers\n", cpuid);
947 return (ENXIO);
948 } else if (bootverbose) {
949 device_printf(sc->dev,
950 "CPU%u enabled CPU interface via system registers\n",
951 cpuid);
952 }
953
954 return (0);
955 }
956
957 static int
958 gic_v3_cpu_init(struct gic_v3_softc *sc)
959 {
960 int err;
961
962 /* Enable access to CPU interface via system registers */
963 err = gic_v3_cpu_enable_sre(sc);
964 if (err != 0)
965 return (err);
966 /* Priority mask to minimum - accept all interrupts */
967 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
968 /* Disable EOI mode */
969 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
970 /* Enable group 1 (insecure) interrups */
971 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
972
973 return (0);
974 }
975
976 /* Distributor */
977 static int
978 gic_v3_dist_init(struct gic_v3_softc *sc)
979 {
980 uint64_t aff;
981 u_int i;
982
983 /*
984 * 1. Disable the Distributor
985 */
986 gic_d_write(sc, 4, GICD_CTLR, 0);
987 gic_v3_wait_for_rwp(sc, DIST);
988
989 /*
990 * 2. Configure the Distributor
991 */
992 /* Set all global interrupts to be level triggered, active low. */
993 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
994 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
995
996 /* Set priority to all shared interrupts */
997 for (i = GIC_FIRST_SPI;
998 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
999 /* Set highest priority */
1000 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1001 }
1002
1003 /*
1004 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1005 * Re-Distributor registers.
1006 */
1007 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1008 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1009
1010 gic_v3_wait_for_rwp(sc, DIST);
1011
1012 /*
1013 * 3. Enable Distributor
1014 */
1015 /* Enable Distributor with ARE, Group 1 */
1016 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1017 GICD_CTLR_G1);
1018
1019 /*
1020 * 4. Route all interrupts to boot CPU.
1021 */
1022 aff = CPU_AFFINITY(0);
1023 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1024 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1025
1026 return (0);
1027 }
1028
1029 /* Re-Distributor */
1030 static int
1031 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1032 {
1033 u_int cpuid;
1034
1035 /* Allocate struct resource for all CPU's Re-Distributor registers */
1036 for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
1037 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1038 sc->gic_redists.pcpu[cpuid] =
1039 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1040 M_GIC_V3, M_WAITOK);
1041 else
1042 sc->gic_redists.pcpu[cpuid] = NULL;
1043 return (0);
1044 }
1045
1046 static int
1047 gic_v3_redist_find(struct gic_v3_softc *sc)
1048 {
1049 struct resource r_res;
1050 bus_space_handle_t r_bsh;
1051 uint64_t aff;
1052 uint64_t typer;
1053 uint32_t pidr2;
1054 u_int cpuid;
1055 size_t i;
1056
1057 cpuid = PCPU_GET(cpuid);
1058
1059 aff = CPU_AFFINITY(cpuid);
1060 /* Affinity in format for comparison with typer */
1061 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1062 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1063
1064 if (bootverbose) {
1065 device_printf(sc->dev,
1066 "Start searching for Re-Distributor\n");
1067 }
1068 /* Iterate through Re-Distributor regions */
1069 for (i = 0; i < sc->gic_redists.nregions; i++) {
1070 /* Take a copy of the region's resource */
1071 r_res = *sc->gic_redists.regions[i];
1072 r_bsh = rman_get_bushandle(&r_res);
1073
1074 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1075 switch (pidr2 & GICR_PIDR2_ARCH_MASK) {
1076 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1077 case GICR_PIDR2_ARCH_GICv4:
1078 break;
1079 default:
1080 device_printf(sc->dev,
1081 "No Re-Distributor found for CPU%u\n", cpuid);
1082 return (ENODEV);
1083 }
1084
1085 do {
1086 typer = bus_read_8(&r_res, GICR_TYPER);
1087 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1088 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1089 ("Invalid pointer to per-CPU redistributor"));
1090 /* Copy res contents to its final destination */
1091 *sc->gic_redists.pcpu[cpuid] = r_res;
1092 if (bootverbose) {
1093 device_printf(sc->dev,
1094 "CPU%u Re-Distributor has been found\n",
1095 cpuid);
1096 }
1097 return (0);
1098 }
1099
1100 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1101 if ((typer & GICR_TYPER_VLPIS) != 0) {
1102 r_bsh +=
1103 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1104 }
1105
1106 rman_set_bushandle(&r_res, r_bsh);
1107 } while ((typer & GICR_TYPER_LAST) == 0);
1108 }
1109
1110 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1111 return (ENXIO);
1112 }
1113
1114 static int
1115 gic_v3_redist_wake(struct gic_v3_softc *sc)
1116 {
1117 uint32_t waker;
1118 size_t us_left = 1000000;
1119
1120 waker = gic_r_read(sc, 4, GICR_WAKER);
1121 /* Wake up Re-Distributor for this CPU */
1122 waker &= ~GICR_WAKER_PS;
1123 gic_r_write(sc, 4, GICR_WAKER, waker);
1124 /*
1125 * When clearing ProcessorSleep bit it is required to wait for
1126 * ChildrenAsleep to become zero following the processor power-on.
1127 */
1128 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1129 DELAY(1);
1130 if (us_left-- == 0) {
1131 panic("Could not wake Re-Distributor for CPU%u",
1132 PCPU_GET(cpuid));
1133 }
1134 }
1135
1136 if (bootverbose) {
1137 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1138 PCPU_GET(cpuid));
1139 }
1140
1141 return (0);
1142 }
1143
1144 static int
1145 gic_v3_redist_init(struct gic_v3_softc *sc)
1146 {
1147 int err;
1148 size_t i;
1149
1150 err = gic_v3_redist_find(sc);
1151 if (err != 0)
1152 return (err);
1153
1154 err = gic_v3_redist_wake(sc);
1155 if (err != 0)
1156 return (err);
1157
1158 /* Disable SPIs */
1159 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1160 GICR_I_ENABLER_PPI_MASK);
1161 /* Enable SGIs */
1162 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1163 GICR_I_ENABLER_SGI_MASK);
1164
1165 /* Set priority for SGIs and PPIs */
1166 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1167 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1168 GIC_PRIORITY_MAX);
1169 }
1170
1171 gic_v3_wait_for_rwp(sc, REDIST);
1172
1173 return (0);
1174 }
Cache object: 01f09b02682ed32dd9673bdfe776f4d6
|