1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_platform.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/11.0/sys/arm64/arm64/gic_v3.c 301453 2016-06-05 16:20:12Z skra $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60
61 #ifdef FDT
62 #include <dev/ofw/ofw_bus_subr.h>
63 #endif
64
65 #include "pic_if.h"
66
67 #include "gic_v3_reg.h"
68 #include "gic_v3_var.h"
69
70 static bus_read_ivar_t gic_v3_read_ivar;
71
72 #ifdef INTRNG
73 static pic_disable_intr_t gic_v3_disable_intr;
74 static pic_enable_intr_t gic_v3_enable_intr;
75 static pic_map_intr_t gic_v3_map_intr;
76 static pic_setup_intr_t gic_v3_setup_intr;
77 static pic_teardown_intr_t gic_v3_teardown_intr;
78 static pic_post_filter_t gic_v3_post_filter;
79 static pic_post_ithread_t gic_v3_post_ithread;
80 static pic_pre_ithread_t gic_v3_pre_ithread;
81 static pic_bind_intr_t gic_v3_bind_intr;
82 #ifdef SMP
83 static pic_init_secondary_t gic_v3_init_secondary;
84 static pic_ipi_send_t gic_v3_ipi_send;
85 static pic_ipi_setup_t gic_v3_ipi_setup;
86 #endif
87
88 static u_int gic_irq_cpu;
89 #ifdef SMP
90 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
91 static u_int sgi_first_unused = GIC_FIRST_SGI;
92 #endif
93 #else
94 /* Device and PIC methods */
95 static int gic_v3_bind(device_t, u_int, u_int);
96 static void gic_v3_dispatch(device_t, struct trapframe *);
97 static void gic_v3_eoi(device_t, u_int);
98 static void gic_v3_mask_irq(device_t, u_int);
99 static void gic_v3_unmask_irq(device_t, u_int);
100 #ifdef SMP
101 static void gic_v3_init_secondary(device_t);
102 static void gic_v3_ipi_send(device_t, cpuset_t, u_int);
103 #endif
104 #endif
105
106 static device_method_t gic_v3_methods[] = {
107 /* Device interface */
108 DEVMETHOD(device_detach, gic_v3_detach),
109
110 /* Bus interface */
111 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar),
112
113 #ifdef INTRNG
114 /* Interrupt controller interface */
115 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr),
116 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr),
117 DEVMETHOD(pic_map_intr, gic_v3_map_intr),
118 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr),
119 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr),
120 DEVMETHOD(pic_post_filter, gic_v3_post_filter),
121 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread),
122 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread),
123 #ifdef SMP
124 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr),
125 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
126 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
127 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup),
128 #endif
129 #else
130 /* PIC interface */
131 DEVMETHOD(pic_bind, gic_v3_bind),
132 DEVMETHOD(pic_dispatch, gic_v3_dispatch),
133 DEVMETHOD(pic_eoi, gic_v3_eoi),
134 DEVMETHOD(pic_mask, gic_v3_mask_irq),
135 DEVMETHOD(pic_unmask, gic_v3_unmask_irq),
136 #ifdef SMP
137 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary),
138 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send),
139 #endif
140 #endif
141
142 /* End */
143 DEVMETHOD_END
144 };
145
146 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
147 sizeof(struct gic_v3_softc));
148
149 /*
150 * Driver-specific definitions.
151 */
152 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
153
154 /*
155 * Helper functions and definitions.
156 */
157 /* Destination registers, either Distributor or Re-Distributor */
158 enum gic_v3_xdist {
159 DIST = 0,
160 REDIST,
161 };
162
163 /* Helper routines starting with gic_v3_ */
164 static int gic_v3_dist_init(struct gic_v3_softc *);
165 static int gic_v3_redist_alloc(struct gic_v3_softc *);
166 static int gic_v3_redist_find(struct gic_v3_softc *);
167 static int gic_v3_redist_init(struct gic_v3_softc *);
168 static int gic_v3_cpu_init(struct gic_v3_softc *);
169 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
170
171 /* A sequence of init functions for primary (boot) CPU */
172 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
173 /* Primary CPU initialization sequence */
174 static gic_v3_initseq_t gic_v3_primary_init[] = {
175 gic_v3_dist_init,
176 gic_v3_redist_alloc,
177 gic_v3_redist_init,
178 gic_v3_cpu_init,
179 NULL
180 };
181
182 #ifdef SMP
183 /* Secondary CPU initialization sequence */
184 static gic_v3_initseq_t gic_v3_secondary_init[] = {
185 gic_v3_redist_init,
186 gic_v3_cpu_init,
187 NULL
188 };
189 #endif
190
191 #ifdef INTRNG
192 uint32_t
193 gic_r_read_4(device_t dev, bus_size_t offset)
194 {
195 struct gic_v3_softc *sc;
196
197 sc = device_get_softc(dev);
198 return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
199 }
200
201 uint64_t
202 gic_r_read_8(device_t dev, bus_size_t offset)
203 {
204 struct gic_v3_softc *sc;
205
206 sc = device_get_softc(dev);
207 return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
208 }
209
210 void
211 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
212 {
213 struct gic_v3_softc *sc;
214
215 sc = device_get_softc(dev);
216 bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
217 }
218
219 void
220 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
221 {
222 struct gic_v3_softc *sc;
223
224 sc = device_get_softc(dev);
225 bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
226 }
227 #endif
228
229 /*
230 * Device interface.
231 */
232 int
233 gic_v3_attach(device_t dev)
234 {
235 struct gic_v3_softc *sc;
236 gic_v3_initseq_t *init_func;
237 uint32_t typer;
238 int rid;
239 int err;
240 size_t i;
241 #ifdef INTRNG
242 u_int irq;
243 const char *name;
244 #endif
245
246 sc = device_get_softc(dev);
247 sc->gic_registered = FALSE;
248 sc->dev = dev;
249 err = 0;
250
251 /* Initialize mutex */
252 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
253
254 /*
255 * Allocate array of struct resource.
256 * One entry for Distributor and all remaining for Re-Distributor.
257 */
258 sc->gic_res = malloc(
259 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
260 M_GIC_V3, M_WAITOK);
261
262 /* Now allocate corresponding resources */
263 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
264 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
265 &rid, RF_ACTIVE);
266 if (sc->gic_res[rid] == NULL)
267 return (ENXIO);
268 }
269
270 /*
271 * Distributor interface
272 */
273 sc->gic_dist = sc->gic_res[0];
274
275 /*
276 * Re-Dristributor interface
277 */
278 /* Allocate space under region descriptions */
279 sc->gic_redists.regions = malloc(
280 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
281 M_GIC_V3, M_WAITOK);
282
283 /* Fill-up bus_space information for each region. */
284 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
285 sc->gic_redists.regions[i] = sc->gic_res[rid];
286
287 /* Get the number of supported SPI interrupts */
288 typer = gic_d_read(sc, 4, GICD_TYPER);
289 sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
290 if (sc->gic_nirqs > GIC_I_NUM_MAX)
291 sc->gic_nirqs = GIC_I_NUM_MAX;
292
293 #ifdef INTRNG
294 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
295 M_GIC_V3, M_WAITOK | M_ZERO);
296 name = device_get_nameunit(dev);
297 for (irq = 0; irq < sc->gic_nirqs; irq++) {
298 struct intr_irqsrc *isrc;
299
300 sc->gic_irqs[irq].gi_irq = irq;
301 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
302 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
303
304 isrc = &sc->gic_irqs[irq].gi_isrc;
305 if (irq <= GIC_LAST_SGI) {
306 err = intr_isrc_register(isrc, sc->dev,
307 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
308 } else if (irq <= GIC_LAST_PPI) {
309 err = intr_isrc_register(isrc, sc->dev,
310 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
311 } else {
312 err = intr_isrc_register(isrc, sc->dev, 0,
313 "%s,s%u", name, irq - GIC_FIRST_SPI);
314 }
315 if (err != 0) {
316 /* XXX call intr_isrc_deregister() */
317 free(sc->gic_irqs, M_DEVBUF);
318 return (err);
319 }
320 }
321 #endif
322
323 /* Get the number of supported interrupt identifier bits */
324 sc->gic_idbits = GICD_TYPER_IDBITS(typer);
325
326 if (bootverbose) {
327 device_printf(dev, "SPIs: %u, IDs: %u\n",
328 sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
329 }
330
331 /* Train init sequence for boot CPU */
332 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
333 err = (*init_func)(sc);
334 if (err != 0)
335 return (err);
336 }
337 /*
338 * Full success.
339 * Now register PIC to the interrupts handling layer.
340 */
341 #ifndef INTRNG
342 arm_register_root_pic(dev, sc->gic_nirqs);
343 sc->gic_registered = TRUE;
344 #endif
345
346 return (0);
347 }
348
349 int
350 gic_v3_detach(device_t dev)
351 {
352 struct gic_v3_softc *sc;
353 size_t i;
354 int rid;
355
356 sc = device_get_softc(dev);
357
358 if (device_is_attached(dev)) {
359 /*
360 * XXX: We should probably deregister PIC
361 */
362 if (sc->gic_registered)
363 panic("Trying to detach registered PIC");
364 }
365 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
366 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
367
368 for (i = 0; i < mp_ncpus; i++)
369 free(sc->gic_redists.pcpu[i], M_GIC_V3);
370
371 free(sc->gic_res, M_GIC_V3);
372 free(sc->gic_redists.regions, M_GIC_V3);
373
374 return (0);
375 }
376
377 static int
378 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
379 {
380 struct gic_v3_softc *sc;
381
382 sc = device_get_softc(dev);
383
384 switch (which) {
385 case GICV3_IVAR_NIRQS:
386 *result = sc->gic_nirqs;
387 return (0);
388 case GICV3_IVAR_REDIST_VADDR:
389 *result = (uintptr_t)rman_get_virtual(
390 sc->gic_redists.pcpu[PCPU_GET(cpuid)]);
391 return (0);
392 }
393
394 return (ENOENT);
395 }
396
397 #ifdef INTRNG
398 int
399 arm_gic_v3_intr(void *arg)
400 {
401 struct gic_v3_softc *sc = arg;
402 struct gic_v3_irqsrc *gi;
403 struct intr_pic *pic;
404 uint64_t active_irq;
405 struct trapframe *tf;
406 bool first;
407
408 first = true;
409 pic = sc->gic_pic;
410
411 while (1) {
412 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
413 /*
414 * Hardware: Cavium ThunderX
415 * Chip revision: Pass 1.0 (early version)
416 * Pass 1.1 (production)
417 * ERRATUM: 22978, 23154
418 */
419 __asm __volatile(
420 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
421 "mrs %0, ICC_IAR1_EL1 \n"
422 "nop;nop;nop;nop; \n"
423 "dsb sy \n"
424 : "=&r" (active_irq));
425 } else {
426 active_irq = gic_icc_read(IAR1);
427 }
428
429 if (active_irq >= GIC_FIRST_LPI) {
430 intr_child_irq_handler(pic, active_irq);
431 continue;
432 }
433
434 if (__predict_false(active_irq >= sc->gic_nirqs))
435 return (FILTER_HANDLED);
436
437 tf = curthread->td_intr_frame;
438 gi = &sc->gic_irqs[active_irq];
439 if (active_irq <= GIC_LAST_SGI) {
440 /* Call EOI for all IPI before dispatch. */
441 gic_icc_write(EOIR1, (uint64_t)active_irq);
442 #ifdef SMP
443 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
444 #else
445 device_printf(sc->dev, "SGI %u on UP system detected\n",
446 active_irq - GIC_FIRST_SGI);
447 #endif
448 } else if (active_irq >= GIC_FIRST_PPI &&
449 active_irq <= GIC_LAST_SPI) {
450 if (gi->gi_pol == INTR_TRIGGER_EDGE)
451 gic_icc_write(EOIR1, gi->gi_irq);
452
453 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
454 if (gi->gi_pol != INTR_TRIGGER_EDGE)
455 gic_icc_write(EOIR1, gi->gi_irq);
456 gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
457 device_printf(sc->dev,
458 "Stray irq %lu disabled\n", active_irq);
459 }
460 }
461 }
462 }
463
464 #ifdef FDT
465 static int
466 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
467 enum intr_polarity *polp, enum intr_trigger *trigp)
468 {
469 u_int irq;
470
471 if (ncells < 3)
472 return (EINVAL);
473
474 /*
475 * The 1st cell is the interrupt type:
476 * 0 = SPI
477 * 1 = PPI
478 * The 2nd cell contains the interrupt number:
479 * [0 - 987] for SPI
480 * [0 - 15] for PPI
481 * The 3rd cell is the flags, encoded as follows:
482 * bits[3:0] trigger type and level flags
483 * 1 = edge triggered
484 * 2 = edge triggered (PPI only)
485 * 4 = level-sensitive
486 * 8 = level-sensitive (PPI only)
487 */
488 switch (cells[0]) {
489 case 0:
490 irq = GIC_FIRST_SPI + cells[1];
491 /* SPI irq is checked later. */
492 break;
493 case 1:
494 irq = GIC_FIRST_PPI + cells[1];
495 if (irq > GIC_LAST_PPI) {
496 device_printf(dev, "unsupported PPI interrupt "
497 "number %u\n", cells[1]);
498 return (EINVAL);
499 }
500 break;
501 default:
502 device_printf(dev, "unsupported interrupt type "
503 "configuration %u\n", cells[0]);
504 return (EINVAL);
505 }
506
507 switch (cells[2] & 0xf) {
508 case 1:
509 *trigp = INTR_TRIGGER_EDGE;
510 *polp = INTR_POLARITY_HIGH;
511 break;
512 case 2:
513 *trigp = INTR_TRIGGER_EDGE;
514 *polp = INTR_POLARITY_LOW;
515 break;
516 case 4:
517 *trigp = INTR_TRIGGER_LEVEL;
518 *polp = INTR_POLARITY_HIGH;
519 break;
520 case 8:
521 *trigp = INTR_TRIGGER_LEVEL;
522 *polp = INTR_POLARITY_LOW;
523 break;
524 default:
525 device_printf(dev, "unsupported trigger/polarity "
526 "configuration 0x%02x\n", cells[2]);
527 return (EINVAL);
528 }
529
530 /* Check the interrupt is valid */
531 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
532 return (EINVAL);
533
534 *irqp = irq;
535 return (0);
536 }
537 #endif
538
539 static int
540 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
541 enum intr_polarity *polp, enum intr_trigger *trigp)
542 {
543 struct gic_v3_softc *sc;
544 enum intr_polarity pol;
545 enum intr_trigger trig;
546 #ifdef FDT
547 struct intr_map_data_fdt *daf;
548 #endif
549 u_int irq;
550
551 sc = device_get_softc(dev);
552
553 switch (data->type) {
554 #ifdef FDT
555 case INTR_MAP_DATA_FDT:
556 daf = (struct intr_map_data_fdt *)data;
557 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
558 &trig) != 0)
559 return (EINVAL);
560 break;
561 #endif
562 default:
563 return (EINVAL);
564 }
565
566 if (irq >= sc->gic_nirqs)
567 return (EINVAL);
568 switch (pol) {
569 case INTR_POLARITY_CONFORM:
570 case INTR_POLARITY_LOW:
571 case INTR_POLARITY_HIGH:
572 break;
573 default:
574 return (EINVAL);
575 }
576 switch (trig) {
577 case INTR_TRIGGER_CONFORM:
578 case INTR_TRIGGER_EDGE:
579 case INTR_TRIGGER_LEVEL:
580 break;
581 default:
582 return (EINVAL);
583 }
584
585 *irqp = irq;
586 if (polp != NULL)
587 *polp = pol;
588 if (trigp != NULL)
589 *trigp = trig;
590 return (0);
591 }
592
593 static int
594 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
595 struct intr_irqsrc **isrcp)
596 {
597 struct gic_v3_softc *sc;
598 int error;
599 u_int irq;
600
601 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
602 if (error == 0) {
603 sc = device_get_softc(dev);
604 *isrcp = GIC_INTR_ISRC(sc, irq);
605 }
606 return (error);
607 }
608
609 static int
610 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
611 struct resource *res, struct intr_map_data *data)
612 {
613 struct gic_v3_softc *sc = device_get_softc(dev);
614 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
615 enum intr_trigger trig;
616 enum intr_polarity pol;
617 uint32_t reg;
618 u_int irq;
619 int error;
620
621 if (data == NULL)
622 return (ENOTSUP);
623
624 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
625 if (error != 0)
626 return (error);
627
628 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
629 trig == INTR_TRIGGER_CONFORM)
630 return (EINVAL);
631
632 /* Compare config if this is not first setup. */
633 if (isrc->isrc_handlers != 0) {
634 if (pol != gi->gi_pol || trig != gi->gi_trig)
635 return (EINVAL);
636 else
637 return (0);
638 }
639
640 gi->gi_pol = pol;
641 gi->gi_trig = trig;
642
643 /*
644 * XXX - In case that per CPU interrupt is going to be enabled in time
645 * when SMP is already started, we need some IPI call which
646 * enables it on others CPUs. Further, it's more complicated as
647 * pic_enable_source() and pic_disable_source() should act on
648 * per CPU basis only. Thus, it should be solved here somehow.
649 */
650 if (isrc->isrc_flags & INTR_ISRCF_PPI)
651 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
652
653 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
654 mtx_lock_spin(&sc->gic_mtx);
655
656 /* Set the trigger and polarity */
657 if (irq <= GIC_LAST_PPI)
658 reg = gic_r_read(sc, 4,
659 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
660 else
661 reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
662 if (trig == INTR_TRIGGER_LEVEL)
663 reg &= ~(2 << ((irq % 16) * 2));
664 else
665 reg |= 2 << ((irq % 16) * 2);
666
667 if (irq <= GIC_LAST_PPI) {
668 gic_r_write(sc, 4,
669 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
670 gic_v3_wait_for_rwp(sc, REDIST);
671 } else {
672 gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
673 gic_v3_wait_for_rwp(sc, DIST);
674 }
675
676 mtx_unlock_spin(&sc->gic_mtx);
677
678 gic_v3_bind_intr(dev, isrc);
679 }
680
681 return (0);
682 }
683
684 static int
685 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
686 struct resource *res, struct intr_map_data *data)
687 {
688 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
689
690 if (isrc->isrc_handlers == 0) {
691 gi->gi_pol = INTR_POLARITY_CONFORM;
692 gi->gi_trig = INTR_TRIGGER_CONFORM;
693 }
694
695 return (0);
696 }
697
698 static void
699 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
700 {
701 struct gic_v3_softc *sc;
702 struct gic_v3_irqsrc *gi;
703 u_int irq;
704
705 sc = device_get_softc(dev);
706 gi = (struct gic_v3_irqsrc *)isrc;
707 irq = gi->gi_irq;
708
709 if (irq <= GIC_LAST_PPI) {
710 /* SGIs and PPIs in corresponding Re-Distributor */
711 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
712 GICD_I_MASK(irq));
713 gic_v3_wait_for_rwp(sc, REDIST);
714 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
715 /* SPIs in distributor */
716 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
717 gic_v3_wait_for_rwp(sc, DIST);
718 } else
719 panic("%s: Unsupported IRQ %u", __func__, irq);
720 }
721
722 static void
723 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
724 {
725 struct gic_v3_softc *sc;
726 struct gic_v3_irqsrc *gi;
727 u_int irq;
728
729 sc = device_get_softc(dev);
730 gi = (struct gic_v3_irqsrc *)isrc;
731 irq = gi->gi_irq;
732
733 if (irq <= GIC_LAST_PPI) {
734 /* SGIs and PPIs in corresponding Re-Distributor */
735 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
736 GICD_I_MASK(irq));
737 gic_v3_wait_for_rwp(sc, REDIST);
738 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
739 /* SPIs in distributor */
740 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
741 gic_v3_wait_for_rwp(sc, DIST);
742 } else
743 panic("%s: Unsupported IRQ %u", __func__, irq);
744 }
745
746 static void
747 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
748 {
749 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
750
751 gic_v3_disable_intr(dev, isrc);
752 gic_icc_write(EOIR1, gi->gi_irq);
753 }
754
755 static void
756 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
757 {
758
759 gic_v3_enable_intr(dev, isrc);
760 }
761
762 static void
763 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
764 {
765 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
766
767 if (gi->gi_pol == INTR_TRIGGER_EDGE)
768 return;
769
770 gic_icc_write(EOIR1, gi->gi_irq);
771 }
772
773 static int
774 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
775 {
776 struct gic_v3_softc *sc;
777 struct gic_v3_irqsrc *gi;
778 int cpu;
779
780 gi = (struct gic_v3_irqsrc *)isrc;
781 if (gi->gi_irq <= GIC_LAST_PPI)
782 return (EINVAL);
783
784 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
785 ("%s: Attempting to bind an invalid IRQ", __func__));
786
787 sc = device_get_softc(dev);
788
789 if (CPU_EMPTY(&isrc->isrc_cpu)) {
790 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
791 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
792 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
793 CPU_AFFINITY(gic_irq_cpu));
794 } else {
795 /*
796 * We can only bind to a single CPU so select
797 * the first CPU found.
798 */
799 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
800 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
801 }
802
803 return (0);
804 }
805
806 #ifdef SMP
807 static void
808 gic_v3_init_secondary(device_t dev)
809 {
810 device_t child;
811 struct gic_v3_softc *sc;
812 gic_v3_initseq_t *init_func;
813 struct intr_irqsrc *isrc;
814 u_int cpu, irq;
815 int err, i;
816
817 sc = device_get_softc(dev);
818 cpu = PCPU_GET(cpuid);
819
820 /* Train init sequence for boot CPU */
821 for (init_func = gic_v3_secondary_init; *init_func != NULL;
822 init_func++) {
823 err = (*init_func)(sc);
824 if (err != 0) {
825 device_printf(dev,
826 "Could not initialize GIC for CPU%u\n", cpu);
827 return;
828 }
829 }
830
831 /* Unmask attached SGI interrupts. */
832 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
833 isrc = GIC_INTR_ISRC(sc, irq);
834 if (intr_isrc_init_on_cpu(isrc, cpu))
835 gic_v3_enable_intr(dev, isrc);
836 }
837
838 /* Unmask attached PPI interrupts. */
839 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
840 isrc = GIC_INTR_ISRC(sc, irq);
841 if (intr_isrc_init_on_cpu(isrc, cpu))
842 gic_v3_enable_intr(dev, isrc);
843 }
844
845 for (i = 0; i < sc->gic_nchildren; i++) {
846 child = sc->gic_children[i];
847 PIC_INIT_SECONDARY(child);
848 }
849 }
850
851 static void
852 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
853 u_int ipi)
854 {
855 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
856 uint64_t aff, val, irq;
857 int i;
858
859 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
860 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK)
861 aff = GIC_AFFINITY(0);
862 irq = gi->gi_irq;
863 val = 0;
864
865 /* Iterate through all CPUs in set */
866 for (i = 0; i < mp_ncpus; i++) {
867 /* Move to the next affinity group */
868 if (aff != GIC_AFFINITY(i)) {
869 /* Send the IPI */
870 if (val != 0) {
871 gic_icc_write(SGI1R, val);
872 val = 0;
873 }
874 aff = GIC_AFFINITY(i);
875 }
876
877 /* Send the IPI to this cpu */
878 if (CPU_ISSET(i, &cpus)) {
879 #define ICC_SGI1R_AFFINITY(aff) \
880 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \
881 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \
882 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
883 /* Set the affinity when the first at this level */
884 if (val == 0)
885 val = ICC_SGI1R_AFFINITY(aff) |
886 irq << ICC_SGI1R_EL1_SGIID_SHIFT;
887 /* Set the bit to send the IPI to te CPU */
888 val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
889 }
890 }
891
892 /* Send the IPI to the last cpu affinity group */
893 if (val != 0)
894 gic_icc_write(SGI1R, val);
895 #undef GIC_AFF_MASK
896 #undef GIC_AFFINITY
897 }
898
899 static int
900 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
901 {
902 struct intr_irqsrc *isrc;
903 struct gic_v3_softc *sc = device_get_softc(dev);
904
905 if (sgi_first_unused > GIC_LAST_SGI)
906 return (ENOSPC);
907
908 isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
909 sgi_to_ipi[sgi_first_unused++] = ipi;
910
911 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
912
913 *isrcp = isrc;
914 return (0);
915 }
916 #endif /* SMP */
917 #else /* INTRNG */
918 /*
919 * PIC interface.
920 */
921
922 static int
923 gic_v3_bind(device_t dev, u_int irq, u_int cpuid)
924 {
925 uint64_t aff;
926 struct gic_v3_softc *sc;
927
928 sc = device_get_softc(dev);
929
930 if (irq <= GIC_LAST_PPI) {
931 /* Can't bind PPI to another CPU but it's not an error */
932 return (0);
933 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
934 aff = CPU_AFFINITY(cpuid);
935 gic_d_write(sc, 4, GICD_IROUTER(irq), aff);
936 return (0);
937 } else if (irq >= GIC_FIRST_LPI)
938 return (lpi_migrate(dev, irq, cpuid));
939
940 return (EINVAL);
941 }
942
943 static void
944 gic_v3_dispatch(device_t dev, struct trapframe *frame)
945 {
946 uint64_t active_irq;
947
948 while (1) {
949 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
950 /*
951 * Hardware: Cavium ThunderX
952 * Chip revision: Pass 1.0 (early version)
953 * Pass 1.1 (production)
954 * ERRATUM: 22978, 23154
955 */
956 __asm __volatile(
957 "nop;nop;nop;nop;nop;nop;nop;nop; \n"
958 "mrs %0, ICC_IAR1_EL1 \n"
959 "nop;nop;nop;nop; \n"
960 "dsb sy \n"
961 : "=&r" (active_irq));
962 } else {
963 active_irq = gic_icc_read(IAR1);
964 }
965
966 if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR))
967 break;
968
969 if (__predict_true((active_irq >= GIC_FIRST_PPI &&
970 active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) {
971 arm_dispatch_intr(active_irq, frame);
972 continue;
973 }
974
975 if (active_irq <= GIC_LAST_SGI) {
976 gic_icc_write(EOIR1, (uint64_t)active_irq);
977 arm_dispatch_intr(active_irq, frame);
978 continue;
979 }
980 }
981 }
982
983 static void
984 gic_v3_eoi(device_t dev, u_int irq)
985 {
986
987 gic_icc_write(EOIR1, (uint64_t)irq);
988 }
989
990 static void
991 gic_v3_mask_irq(device_t dev, u_int irq)
992 {
993 struct gic_v3_softc *sc;
994
995 sc = device_get_softc(dev);
996
997 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
998 gic_r_write(sc, 4,
999 GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq));
1000 gic_v3_wait_for_rwp(sc, REDIST);
1001 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
1002 gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
1003 gic_v3_wait_for_rwp(sc, DIST);
1004 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
1005 lpi_mask_irq(dev, irq);
1006 } else
1007 panic("%s: Unsupported IRQ number %u", __func__, irq);
1008 }
1009
1010 static void
1011 gic_v3_unmask_irq(device_t dev, u_int irq)
1012 {
1013 struct gic_v3_softc *sc;
1014
1015 sc = device_get_softc(dev);
1016
1017 if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */
1018 gic_r_write(sc, 4,
1019 GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq));
1020 gic_v3_wait_for_rwp(sc, REDIST);
1021 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */
1022 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
1023 gic_v3_wait_for_rwp(sc, DIST);
1024 } else if (irq >= GIC_FIRST_LPI) { /* LPIs */
1025 lpi_unmask_irq(dev, irq);
1026 } else
1027 panic("%s: Unsupported IRQ number %u", __func__, irq);
1028 }
1029
1030 #ifdef SMP
1031 static void
1032 gic_v3_init_secondary(device_t dev)
1033 {
1034 struct gic_v3_softc *sc;
1035 gic_v3_initseq_t *init_func;
1036 int err;
1037
1038 sc = device_get_softc(dev);
1039
1040 /* Train init sequence for boot CPU */
1041 for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) {
1042 err = (*init_func)(sc);
1043 if (err != 0) {
1044 device_printf(dev,
1045 "Could not initialize GIC for CPU%u\n",
1046 PCPU_GET(cpuid));
1047 return;
1048 }
1049 }
1050
1051 /*
1052 * Try to initialize ITS.
1053 * If there is no driver attached this routine will fail but that
1054 * does not mean failure here as only LPIs will not be functional
1055 * on the current CPU.
1056 */
1057 if (its_init_cpu(NULL) != 0) {
1058 device_printf(dev,
1059 "Could not initialize ITS for CPU%u. "
1060 "No LPIs will arrive on this CPU\n",
1061 PCPU_GET(cpuid));
1062 }
1063
1064 /*
1065 * ARM64TODO: Unmask timer PPIs. To be removed when appropriate
1066 * mechanism is implemented.
1067 * Activate the timer interrupts: virtual (27), secure (29),
1068 * and non-secure (30). Use hardcoded values here as there
1069 * should be no defines for them.
1070 */
1071 gic_v3_unmask_irq(dev, 27);
1072 gic_v3_unmask_irq(dev, 29);
1073 gic_v3_unmask_irq(dev, 30);
1074 }
1075
1076 static void
1077 gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi)
1078 {
1079 u_int cpu;
1080 uint64_t aff, tlist;
1081 uint64_t val;
1082 uint64_t aff_mask;
1083
1084 /* Set affinity mask to match level 3, 2 and 1 */
1085 aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK;
1086
1087 /* Iterate through all CPUs in set */
1088 while (!CPU_EMPTY(&cpuset)) {
1089 aff = tlist = 0;
1090 for (cpu = 0; cpu < mp_ncpus; cpu++) {
1091 /* Compose target list for single AFF3:AFF2:AFF1 set */
1092 if (CPU_ISSET(cpu, &cpuset)) {
1093 if (!tlist) {
1094 /*
1095 * Save affinity of the first CPU to
1096 * send IPI to for later comparison.
1097 */
1098 aff = CPU_AFFINITY(cpu);
1099 tlist |= (1UL << CPU_AFF0(aff));
1100 CPU_CLR(cpu, &cpuset);
1101 }
1102 /* Check for same Affinity level 3, 2 and 1 */
1103 if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) {
1104 tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu)));
1105 /* Clear CPU in cpuset from target list */
1106 CPU_CLR(cpu, &cpuset);
1107 }
1108 }
1109 }
1110 if (tlist) {
1111 KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0,
1112 ("Target list too long for GICv3 IPI"));
1113 /* Send SGI to CPUs in target list */
1114 val = tlist;
1115 val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT;
1116 val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT;
1117 val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT;
1118 val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) <<
1119 ICC_SGI1R_EL1_SGIID_SHIFT;
1120 gic_icc_write(SGI1R, val);
1121 }
1122 }
1123 }
1124 #endif
1125 #endif /* !INTRNG */
1126
1127 /*
1128 * Helper routines
1129 */
1130 static void
1131 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1132 {
1133 struct resource *res;
1134 u_int cpuid;
1135 size_t us_left = 1000000;
1136
1137 cpuid = PCPU_GET(cpuid);
1138
1139 switch (xdist) {
1140 case DIST:
1141 res = sc->gic_dist;
1142 break;
1143 case REDIST:
1144 res = sc->gic_redists.pcpu[cpuid];
1145 break;
1146 default:
1147 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1148 return;
1149 }
1150
1151 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1152 DELAY(1);
1153 if (us_left-- == 0)
1154 panic("GICD Register write pending for too long");
1155 }
1156 }
1157
1158 /* CPU interface. */
1159 static __inline void
1160 gic_v3_cpu_priority(uint64_t mask)
1161 {
1162
1163 /* Set prority mask */
1164 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1165 }
1166
1167 static int
1168 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1169 {
1170 uint64_t sre;
1171 u_int cpuid;
1172
1173 cpuid = PCPU_GET(cpuid);
1174 /*
1175 * Set the SRE bit to enable access to GIC CPU interface
1176 * via system registers.
1177 */
1178 sre = READ_SPECIALREG(icc_sre_el1);
1179 sre |= ICC_SRE_EL1_SRE;
1180 WRITE_SPECIALREG(icc_sre_el1, sre);
1181 isb();
1182 /*
1183 * Now ensure that the bit is set.
1184 */
1185 sre = READ_SPECIALREG(icc_sre_el1);
1186 if ((sre & ICC_SRE_EL1_SRE) == 0) {
1187 /* We are done. This was disabled in EL2 */
1188 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1189 "via system registers\n", cpuid);
1190 return (ENXIO);
1191 } else if (bootverbose) {
1192 device_printf(sc->dev,
1193 "CPU%u enabled CPU interface via system registers\n",
1194 cpuid);
1195 }
1196
1197 return (0);
1198 }
1199
1200 static int
1201 gic_v3_cpu_init(struct gic_v3_softc *sc)
1202 {
1203 int err;
1204
1205 /* Enable access to CPU interface via system registers */
1206 err = gic_v3_cpu_enable_sre(sc);
1207 if (err != 0)
1208 return (err);
1209 /* Priority mask to minimum - accept all interrupts */
1210 gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1211 /* Disable EOI mode */
1212 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1213 /* Enable group 1 (insecure) interrups */
1214 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1215
1216 return (0);
1217 }
1218
1219 /* Distributor */
1220 static int
1221 gic_v3_dist_init(struct gic_v3_softc *sc)
1222 {
1223 uint64_t aff;
1224 u_int i;
1225
1226 /*
1227 * 1. Disable the Distributor
1228 */
1229 gic_d_write(sc, 4, GICD_CTLR, 0);
1230 gic_v3_wait_for_rwp(sc, DIST);
1231
1232 /*
1233 * 2. Configure the Distributor
1234 */
1235 /* Set all global interrupts to be level triggered, active low. */
1236 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1237 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1238
1239 /* Set priority to all shared interrupts */
1240 for (i = GIC_FIRST_SPI;
1241 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1242 /* Set highest priority */
1243 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1244 }
1245
1246 /*
1247 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1248 * Re-Distributor registers.
1249 */
1250 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1251 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1252
1253 gic_v3_wait_for_rwp(sc, DIST);
1254
1255 /*
1256 * 3. Enable Distributor
1257 */
1258 /* Enable Distributor with ARE, Group 1 */
1259 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1260 GICD_CTLR_G1);
1261
1262 /*
1263 * 4. Route all interrupts to boot CPU.
1264 */
1265 aff = CPU_AFFINITY(0);
1266 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1267 gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1268
1269 return (0);
1270 }
1271
1272 /* Re-Distributor */
1273 static int
1274 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1275 {
1276 u_int cpuid;
1277
1278 /* Allocate struct resource for all CPU's Re-Distributor registers */
1279 for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
1280 if (CPU_ISSET(cpuid, &all_cpus) != 0)
1281 sc->gic_redists.pcpu[cpuid] =
1282 malloc(sizeof(*sc->gic_redists.pcpu[0]),
1283 M_GIC_V3, M_WAITOK);
1284 else
1285 sc->gic_redists.pcpu[cpuid] = NULL;
1286 return (0);
1287 }
1288
1289 static int
1290 gic_v3_redist_find(struct gic_v3_softc *sc)
1291 {
1292 struct resource r_res;
1293 bus_space_handle_t r_bsh;
1294 uint64_t aff;
1295 uint64_t typer;
1296 uint32_t pidr2;
1297 u_int cpuid;
1298 size_t i;
1299
1300 cpuid = PCPU_GET(cpuid);
1301
1302 aff = CPU_AFFINITY(cpuid);
1303 /* Affinity in format for comparison with typer */
1304 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1305 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1306
1307 if (bootverbose) {
1308 device_printf(sc->dev,
1309 "Start searching for Re-Distributor\n");
1310 }
1311 /* Iterate through Re-Distributor regions */
1312 for (i = 0; i < sc->gic_redists.nregions; i++) {
1313 /* Take a copy of the region's resource */
1314 r_res = *sc->gic_redists.regions[i];
1315 r_bsh = rman_get_bushandle(&r_res);
1316
1317 pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1318 switch (pidr2 & GICR_PIDR2_ARCH_MASK) {
1319 case GICR_PIDR2_ARCH_GICv3: /* fall through */
1320 case GICR_PIDR2_ARCH_GICv4:
1321 break;
1322 default:
1323 device_printf(sc->dev,
1324 "No Re-Distributor found for CPU%u\n", cpuid);
1325 return (ENODEV);
1326 }
1327
1328 do {
1329 typer = bus_read_8(&r_res, GICR_TYPER);
1330 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1331 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1332 ("Invalid pointer to per-CPU redistributor"));
1333 /* Copy res contents to its final destination */
1334 *sc->gic_redists.pcpu[cpuid] = r_res;
1335 if (bootverbose) {
1336 device_printf(sc->dev,
1337 "CPU%u Re-Distributor has been found\n",
1338 cpuid);
1339 }
1340 return (0);
1341 }
1342
1343 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1344 if ((typer & GICR_TYPER_VLPIS) != 0) {
1345 r_bsh +=
1346 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1347 }
1348
1349 rman_set_bushandle(&r_res, r_bsh);
1350 } while ((typer & GICR_TYPER_LAST) == 0);
1351 }
1352
1353 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1354 return (ENXIO);
1355 }
1356
1357 static int
1358 gic_v3_redist_wake(struct gic_v3_softc *sc)
1359 {
1360 uint32_t waker;
1361 size_t us_left = 1000000;
1362
1363 waker = gic_r_read(sc, 4, GICR_WAKER);
1364 /* Wake up Re-Distributor for this CPU */
1365 waker &= ~GICR_WAKER_PS;
1366 gic_r_write(sc, 4, GICR_WAKER, waker);
1367 /*
1368 * When clearing ProcessorSleep bit it is required to wait for
1369 * ChildrenAsleep to become zero following the processor power-on.
1370 */
1371 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1372 DELAY(1);
1373 if (us_left-- == 0) {
1374 panic("Could not wake Re-Distributor for CPU%u",
1375 PCPU_GET(cpuid));
1376 }
1377 }
1378
1379 if (bootverbose) {
1380 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1381 PCPU_GET(cpuid));
1382 }
1383
1384 return (0);
1385 }
1386
1387 static int
1388 gic_v3_redist_init(struct gic_v3_softc *sc)
1389 {
1390 int err;
1391 size_t i;
1392
1393 err = gic_v3_redist_find(sc);
1394 if (err != 0)
1395 return (err);
1396
1397 err = gic_v3_redist_wake(sc);
1398 if (err != 0)
1399 return (err);
1400
1401 /* Disable SPIs */
1402 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1403 GICR_I_ENABLER_PPI_MASK);
1404 /* Enable SGIs */
1405 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1406 GICR_I_ENABLER_SGI_MASK);
1407
1408 /* Set priority for SGIs and PPIs */
1409 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1410 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1411 GIC_PRIORITY_MAX);
1412 }
1413
1414 gic_v3_wait_for_rwp(sc, REDIST);
1415
1416 return (0);
1417 }
Cache object: d890ff33b6337b7726ff34259f571049
|