1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 2019 Justin Hibbits
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_platform.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/module.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/smp.h>
44
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47
48 #include <machine/bus.h>
49 #include <machine/intr_machdep.h>
50 #include <machine/md_var.h>
51
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54
55 #ifdef POWERNV
56 #include <powerpc/powernv/opal.h>
57 #endif
58
59 #include "pic_if.h"
60
61 #define XIVE_PRIORITY 7 /* Random non-zero number */
62 #define MAX_XIVE_IRQS (1<<24) /* 24-bit XIRR field */
63
64 /* Registers */
65 #define XIVE_TM_QW1_OS 0x010 /* Guest OS registers */
66 #define XIVE_TM_QW2_HV_POOL 0x020 /* Hypervisor pool registers */
67 #define XIVE_TM_QW3_HV 0x030 /* Hypervisor registers */
68
69 #define XIVE_TM_NSR 0x00
70 #define XIVE_TM_CPPR 0x01
71 #define XIVE_TM_IPB 0x02
72 #define XIVE_TM_LSMFB 0x03
73 #define XIVE_TM_ACK_CNT 0x04
74 #define XIVE_TM_INC 0x05
75 #define XIVE_TM_AGE 0x06
76 #define XIVE_TM_PIPR 0x07
77
78 #define TM_WORD0 0x0
79 #define TM_WORD2 0x8
80 #define TM_QW2W2_VP 0x80000000
81
82 #define XIVE_TM_SPC_ACK 0x800
83 #define TM_QW3NSR_HE_SHIFT 14
84 #define TM_QW3_NSR_HE_NONE 0
85 #define TM_QW3_NSR_HE_POOL 1
86 #define TM_QW3_NSR_HE_PHYS 2
87 #define TM_QW3_NSR_HE_LSI 3
88 #define XIVE_TM_SPC_PULL_POOL_CTX 0x828
89
90 #define XIVE_IRQ_LOAD_EOI 0x000
91 #define XIVE_IRQ_STORE_EOI 0x400
92 #define XIVE_IRQ_PQ_00 0xc00
93 #define XIVE_IRQ_PQ_01 0xd00
94
95 #define XIVE_IRQ_VAL_P 0x02
96 #define XIVE_IRQ_VAL_Q 0x01
97
98 struct xive_softc;
99 struct xive_irq;
100
101 extern void (*powernv_smp_ap_extra_init)(void);
102
103 /* Private support */
104 static void xive_setup_cpu(void);
105 static void xive_smp_cpu_startup(void);
106 static void xive_init_irq(struct xive_irq *irqd, u_int irq);
107 static struct xive_irq *xive_configure_irq(u_int irq);
108 static int xive_provision_page(struct xive_softc *sc);
109
110 /* Interfaces */
111 static int xive_probe(device_t);
112 static int xive_attach(device_t);
113 static int xics_probe(device_t);
114 static int xics_attach(device_t);
115
116 static void xive_bind(device_t, u_int, cpuset_t, void **);
117 static void xive_dispatch(device_t, struct trapframe *);
118 static void xive_enable(device_t, u_int, u_int, void **);
119 static void xive_eoi(device_t, u_int, void *);
120 static void xive_ipi(device_t, u_int);
121 static void xive_mask(device_t, u_int, void *);
122 static void xive_unmask(device_t, u_int, void *);
123 static void xive_translate_code(device_t dev, u_int irq, int code,
124 enum intr_trigger *trig, enum intr_polarity *pol);
125
126 static device_method_t xive_methods[] = {
127 /* Device interface */
128 DEVMETHOD(device_probe, xive_probe),
129 DEVMETHOD(device_attach, xive_attach),
130
131 /* PIC interface */
132 DEVMETHOD(pic_bind, xive_bind),
133 DEVMETHOD(pic_dispatch, xive_dispatch),
134 DEVMETHOD(pic_enable, xive_enable),
135 DEVMETHOD(pic_eoi, xive_eoi),
136 DEVMETHOD(pic_ipi, xive_ipi),
137 DEVMETHOD(pic_mask, xive_mask),
138 DEVMETHOD(pic_unmask, xive_unmask),
139 DEVMETHOD(pic_translate_code, xive_translate_code),
140
141 DEVMETHOD_END
142 };
143
144 static device_method_t xics_methods[] = {
145 /* Device interface */
146 DEVMETHOD(device_probe, xics_probe),
147 DEVMETHOD(device_attach, xics_attach),
148
149 DEVMETHOD_END
150 };
151
152 struct xive_softc {
153 struct mtx sc_mtx;
154 struct resource *sc_mem;
155 vm_size_t sc_prov_page_size;
156 uint32_t sc_offset;
157 };
158
159 struct xive_queue {
160 uint32_t *q_page;
161 uint32_t *q_eoi_page;
162 uint32_t q_toggle;
163 uint32_t q_size;
164 uint32_t q_index;
165 uint32_t q_mask;
166 };
167
168 struct xive_irq {
169 uint32_t girq;
170 uint32_t lirq;
171 uint64_t vp;
172 uint64_t flags;
173 #define OPAL_XIVE_IRQ_SHIFT_BUG 0x00000008
174 #define OPAL_XIVE_IRQ_LSI 0x00000004
175 #define OPAL_XIVE_IRQ_STORE_EOI 0x00000002
176 #define OPAL_XIVE_IRQ_TRIGGER_PAGE 0x00000001
177 uint8_t prio;
178 vm_offset_t eoi_page;
179 vm_offset_t trig_page;
180 vm_size_t esb_size;
181 int chip;
182 };
183
184 struct xive_cpu {
185 uint64_t vp;
186 uint64_t flags;
187 struct xive_irq ipi_data;
188 struct xive_queue queue; /* We only use a single queue for now. */
189 uint64_t cam;
190 uint32_t chip;
191 };
192
193 static driver_t xive_driver = {
194 "xive",
195 xive_methods,
196 sizeof(struct xive_softc)
197 };
198
199 static driver_t xics_driver = {
200 "xivevc",
201 xics_methods,
202 0
203 };
204
205 EARLY_DRIVER_MODULE(xive, ofwbus, xive_driver, 0, 0, BUS_PASS_INTERRUPT - 1);
206 EARLY_DRIVER_MODULE(xivevc, ofwbus, xics_driver, 0, 0, BUS_PASS_INTERRUPT);
207
208 MALLOC_DEFINE(M_XIVE, "xive", "XIVE Memory");
209
210 DPCPU_DEFINE_STATIC(struct xive_cpu, xive_cpu_data);
211
212 static int xive_ipi_vector = -1;
213
214 /*
215 * XIVE Exploitation mode driver.
216 *
217 * The XIVE, present in the POWER9 CPU, can run in two modes: XICS emulation
218 * mode, and "Exploitation mode". XICS emulation mode is compatible with the
219 * POWER8 and earlier XICS interrupt controller, using OPAL calls to emulate
220 * hypervisor calls and memory accesses. Exploitation mode gives us raw access
221 * to the XIVE MMIO, improving performance significantly.
222 *
223 * The XIVE controller is a very bizarre interrupt controller. It uses queues
224 * in memory to pass interrupts around, and maps itself into 512GB of physical
225 * device address space, giving each interrupt in the system one or more pages
226 * of address space. An IRQ is tied to a virtual processor, which could be a
227 * physical CPU thread, or a guest CPU thread (LPAR running on a physical
228 * thread). Thus, the controller can route interrupts directly to guest OSes
229 * bypassing processing by the hypervisor, thereby improving performance of the
230 * guest OS.
231 *
232 * An IRQ, in addition to being tied to a virtual processor, has one or two
233 * page mappings: an EOI page, and an optional trigger page. The trigger page
234 * could be the same as the EOI page. Level-sensitive interrupts (LSIs) don't
235 * have a trigger page, as they're external interrupts controlled by physical
236 * lines. MSIs and IPIs have trigger pages. An IPI is really just another IRQ
237 * in the XIVE, which is triggered by software.
238 *
239 * An interesting behavior of the XIVE controller is that oftentimes the
240 * contents of an address location don't actually matter, but the direction of
241 * the action is the signifier (read vs write), and the address is significant.
242 * Hence, masking and unmasking an interrupt is done by reading different
243 * addresses in the EOI page, and triggering an interrupt consists of writing to
244 * the trigger page.
245 *
246 * Additionally, the MMIO region mapped is CPU-sensitive, just like the
247 * per-processor register space (private access) in OpenPIC. In order for a CPU
248 * to receive interrupts it must itself configure its CPPR (Current Processor
249 * Priority Register), it cannot be set by any other processor. This
250 * necessitates the xive_smp_cpu_startup() function.
251 *
252 * Queues are pages of memory, sized powers-of-two, that are shared with the
253 * XIVE. The XIVE writes into the queue with an alternating polarity bit, which
254 * flips when the queue wraps.
255 */
256
257 /*
258 * Offset-based read/write interfaces.
259 */
260 static uint16_t
261 xive_read_2(struct xive_softc *sc, bus_size_t offset)
262 {
263
264 return (bus_read_2(sc->sc_mem, sc->sc_offset + offset));
265 }
266
267 static void
268 xive_write_1(struct xive_softc *sc, bus_size_t offset, uint8_t val)
269 {
270
271 bus_write_1(sc->sc_mem, sc->sc_offset + offset, val);
272 }
273
274 /* EOI and Trigger page access interfaces. */
275 static uint64_t
276 xive_read_mmap8(vm_offset_t addr)
277 {
278 return (*(volatile uint64_t *)addr);
279 }
280
281 static void
282 xive_write_mmap8(vm_offset_t addr, uint64_t val)
283 {
284 *(uint64_t *)(addr) = val;
285 }
286
287 /* Device interfaces. */
288 static int
289 xive_probe(device_t dev)
290 {
291
292 if (!ofw_bus_is_compatible(dev, "ibm,opal-xive-pe"))
293 return (ENXIO);
294
295 device_set_desc(dev, "External Interrupt Virtualization Engine");
296
297 /* Make sure we always win against the xicp driver. */
298 return (BUS_PROBE_DEFAULT);
299 }
300
301 static int
302 xics_probe(device_t dev)
303 {
304
305 if (!ofw_bus_is_compatible(dev, "ibm,opal-xive-vc"))
306 return (ENXIO);
307
308 device_set_desc(dev, "External Interrupt Virtualization Engine Root");
309 return (BUS_PROBE_DEFAULT);
310 }
311
312 static int
313 xive_attach(device_t dev)
314 {
315 struct xive_softc *sc = device_get_softc(dev);
316 struct xive_cpu *xive_cpud;
317 phandle_t phandle = ofw_bus_get_node(dev);
318 int64_t vp_block;
319 int error;
320 int rid;
321 int i, order;
322 uint64_t vp_id;
323 int64_t ipi_irq;
324
325 opal_call(OPAL_XIVE_RESET, OPAL_XIVE_XICS_MODE_EXP);
326
327 error = OF_getencprop(phandle, "ibm,xive-provision-page-size",
328 (pcell_t *)&sc->sc_prov_page_size, sizeof(sc->sc_prov_page_size));
329
330 rid = 1; /* Get the Hypervisor-level register set. */
331 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
332 &rid, RF_ACTIVE);
333 sc->sc_offset = XIVE_TM_QW3_HV;
334
335 mtx_init(&sc->sc_mtx, "XIVE", NULL, MTX_DEF);
336
337 /* Workaround for qemu single-thread powernv */
338 if (mp_maxid == 0)
339 order = 1;
340 else
341 order = fls(mp_maxid + (mp_maxid - 1)) - 1;
342
343 do {
344 vp_block = opal_call(OPAL_XIVE_ALLOCATE_VP_BLOCK, order);
345 if (vp_block == OPAL_BUSY)
346 DELAY(10);
347 else if (vp_block == OPAL_XIVE_PROVISIONING)
348 xive_provision_page(sc);
349 else
350 break;
351 } while (1);
352
353 if (vp_block < 0) {
354 device_printf(dev,
355 "Unable to allocate VP block. Opal error %d\n",
356 (int)vp_block);
357 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->sc_mem);
358 return (ENXIO);
359 }
360
361 /*
362 * Set up the VPs. Try to do as much as we can in attach, to lessen
363 * what's needed at AP spawn time.
364 */
365 CPU_FOREACH(i) {
366 vp_id = pcpu_find(i)->pc_hwref;
367
368 xive_cpud = DPCPU_ID_PTR(i, xive_cpu_data);
369 xive_cpud->vp = vp_id + vp_block;
370 opal_call(OPAL_XIVE_GET_VP_INFO, xive_cpud->vp, NULL,
371 vtophys(&xive_cpud->cam), NULL, vtophys(&xive_cpud->chip));
372
373 xive_cpud->cam = be64toh(xive_cpud->cam);
374 xive_cpud->chip = be64toh(xive_cpud->chip);
375
376 /* Allocate the queue page and populate the queue state data. */
377 xive_cpud->queue.q_page = contigmalloc(PAGE_SIZE, M_XIVE,
378 M_ZERO | M_WAITOK, 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
379 xive_cpud->queue.q_size = 1 << PAGE_SHIFT;
380 xive_cpud->queue.q_mask =
381 ((xive_cpud->queue.q_size / sizeof(int)) - 1);
382 xive_cpud->queue.q_toggle = 0;
383 xive_cpud->queue.q_index = 0;
384 do {
385 error = opal_call(OPAL_XIVE_SET_VP_INFO, xive_cpud->vp,
386 OPAL_XIVE_VP_ENABLED, 0);
387 } while (error == OPAL_BUSY);
388 error = opal_call(OPAL_XIVE_SET_QUEUE_INFO, vp_id,
389 XIVE_PRIORITY, vtophys(xive_cpud->queue.q_page), PAGE_SHIFT,
390 OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED);
391
392 do {
393 ipi_irq = opal_call(OPAL_XIVE_ALLOCATE_IRQ,
394 xive_cpud->chip);
395 } while (ipi_irq == OPAL_BUSY);
396
397 if (ipi_irq < 0)
398 device_printf(root_pic,
399 "Failed allocating IPI. OPAL error %d\n",
400 (int)ipi_irq);
401 else {
402 xive_init_irq(&xive_cpud->ipi_data, ipi_irq);
403 xive_cpud->ipi_data.vp = vp_id;
404 xive_cpud->ipi_data.lirq = MAX_XIVE_IRQS;
405 opal_call(OPAL_XIVE_SET_IRQ_CONFIG, ipi_irq,
406 xive_cpud->ipi_data.vp, XIVE_PRIORITY,
407 MAX_XIVE_IRQS);
408 }
409 }
410
411 powerpc_register_pic(dev, OF_xref_from_node(phandle), MAX_XIVE_IRQS,
412 1 /* Number of IPIs */, FALSE);
413 root_pic = dev;
414
415 xive_setup_cpu();
416 powernv_smp_ap_extra_init = xive_smp_cpu_startup;
417
418 return (0);
419 }
420
421 static int
422 xics_attach(device_t dev)
423 {
424 phandle_t phandle = ofw_bus_get_node(dev);
425
426 /* The XIVE (root PIC) will handle all our interrupts */
427 powerpc_register_pic(root_pic, OF_xref_from_node(phandle),
428 MAX_XIVE_IRQS, 1 /* Number of IPIs */, FALSE);
429
430 return (0);
431 }
432
433 /*
434 * PIC I/F methods.
435 */
436
437 static void
438 xive_bind(device_t dev, u_int irq, cpuset_t cpumask, void **priv)
439 {
440 struct xive_irq *irqd;
441 int cpu;
442 int ncpus, i, error;
443
444 if (*priv == NULL)
445 *priv = xive_configure_irq(irq);
446
447 irqd = *priv;
448
449 /*
450 * This doesn't appear to actually support affinity groups, so pick a
451 * random CPU.
452 */
453 ncpus = 0;
454 CPU_FOREACH(cpu)
455 if (CPU_ISSET(cpu, &cpumask)) ncpus++;
456
457 i = mftb() % ncpus;
458 ncpus = 0;
459 CPU_FOREACH(cpu) {
460 if (!CPU_ISSET(cpu, &cpumask))
461 continue;
462 if (ncpus == i)
463 break;
464 ncpus++;
465 }
466
467 opal_call(OPAL_XIVE_SYNC, OPAL_XIVE_SYNC_QUEUE, irq);
468
469 irqd->vp = pcpu_find(cpu)->pc_hwref;
470 error = opal_call(OPAL_XIVE_SET_IRQ_CONFIG, irq, irqd->vp,
471 XIVE_PRIORITY, irqd->lirq);
472
473 if (error < 0)
474 panic("Cannot bind interrupt %d to CPU %d", irq, cpu);
475
476 xive_eoi(dev, irq, irqd);
477 }
478
479 /* Read the next entry in the queue page and update the index. */
480 static int
481 xive_read_eq(struct xive_queue *q)
482 {
483 uint32_t i = be32toh(q->q_page[q->q_index]);
484
485 /* Check validity, using current queue polarity. */
486 if ((i >> 31) == q->q_toggle)
487 return (0);
488
489 q->q_index = (q->q_index + 1) & q->q_mask;
490
491 if (q->q_index == 0)
492 q->q_toggle ^= 1;
493
494 return (i & 0x7fffffff);
495 }
496
497 static void
498 xive_dispatch(device_t dev, struct trapframe *tf)
499 {
500 struct xive_softc *sc;
501 struct xive_cpu *xive_cpud;
502 uint32_t vector;
503 uint16_t ack;
504 uint8_t cppr, he;
505
506 sc = device_get_softc(dev);
507
508 xive_cpud = DPCPU_PTR(xive_cpu_data);
509 for (;;) {
510 ack = xive_read_2(sc, XIVE_TM_SPC_ACK);
511 cppr = (ack & 0xff);
512
513 he = ack >> TM_QW3NSR_HE_SHIFT;
514
515 if (he == TM_QW3_NSR_HE_NONE)
516 break;
517
518 else if (__predict_false(he != TM_QW3_NSR_HE_PHYS)) {
519 /*
520 * We don't support TM_QW3_NSR_HE_POOL or
521 * TM_QW3_NSR_HE_LSI interrupts.
522 */
523 device_printf(dev,
524 "Unexpected interrupt he type: %d\n", he);
525 goto end;
526 }
527
528 xive_write_1(sc, XIVE_TM_CPPR, cppr);
529
530 for (;;) {
531 vector = xive_read_eq(&xive_cpud->queue);
532
533 if (vector == 0)
534 break;
535
536 if (vector == MAX_XIVE_IRQS)
537 vector = xive_ipi_vector;
538
539 powerpc_dispatch_intr(vector, tf);
540 }
541 }
542 end:
543 xive_write_1(sc, XIVE_TM_CPPR, 0xff);
544 }
545
546 static void
547 xive_enable(device_t dev, u_int irq, u_int vector, void **priv)
548 {
549 struct xive_irq *irqd;
550 cell_t status, cpu;
551
552 if (irq == MAX_XIVE_IRQS) {
553 if (xive_ipi_vector == -1)
554 xive_ipi_vector = vector;
555 return;
556 }
557 if (*priv == NULL)
558 *priv = xive_configure_irq(irq);
559
560 irqd = *priv;
561
562 /* Bind to this CPU to start */
563 cpu = PCPU_GET(hwref);
564 irqd->lirq = vector;
565
566 for (;;) {
567 status = opal_call(OPAL_XIVE_SET_IRQ_CONFIG, irq, cpu,
568 XIVE_PRIORITY, vector);
569 if (status != OPAL_BUSY)
570 break;
571 DELAY(10);
572 }
573
574 if (status != 0)
575 panic("OPAL_SET_XIVE IRQ %d -> cpu %d failed: %d", irq,
576 cpu, status);
577
578 xive_unmask(dev, irq, *priv);
579 }
580
581 static void
582 xive_eoi(device_t dev, u_int irq, void *priv)
583 {
584 struct xive_irq *rirq;
585 struct xive_cpu *cpud;
586 uint8_t eoi_val;
587
588 if (irq == MAX_XIVE_IRQS) {
589 cpud = DPCPU_PTR(xive_cpu_data);
590 rirq = &cpud->ipi_data;
591 } else
592 rirq = priv;
593
594 if (rirq->flags & OPAL_XIVE_IRQ_STORE_EOI)
595 xive_write_mmap8(rirq->eoi_page + XIVE_IRQ_STORE_EOI, 0);
596 else if (rirq->flags & OPAL_XIVE_IRQ_LSI)
597 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_LOAD_EOI);
598 else {
599 eoi_val = xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_00);
600 if ((eoi_val & XIVE_IRQ_VAL_Q) && rirq->trig_page != 0)
601 xive_write_mmap8(rirq->trig_page, 0);
602 }
603 }
604
605 static void
606 xive_ipi(device_t dev, u_int cpu)
607 {
608 struct xive_cpu *xive_cpud;
609
610 xive_cpud = DPCPU_ID_PTR(cpu, xive_cpu_data);
611
612 if (xive_cpud->ipi_data.trig_page == 0)
613 return;
614 xive_write_mmap8(xive_cpud->ipi_data.trig_page, 0);
615 }
616
617 static void
618 xive_mask(device_t dev, u_int irq, void *priv)
619 {
620 struct xive_irq *rirq;
621
622 /* Never mask IPIs */
623 if (irq == MAX_XIVE_IRQS)
624 return;
625
626 rirq = priv;
627
628 if (!(rirq->flags & OPAL_XIVE_IRQ_LSI))
629 return;
630 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_01);
631 }
632
633 static void
634 xive_unmask(device_t dev, u_int irq, void *priv)
635 {
636 struct xive_irq *rirq;
637
638 rirq = priv;
639
640 xive_read_mmap8(rirq->eoi_page + XIVE_IRQ_PQ_00);
641 }
642
643 static void
644 xive_translate_code(device_t dev, u_int irq, int code,
645 enum intr_trigger *trig, enum intr_polarity *pol)
646 {
647 switch (code) {
648 case 0:
649 /* L to H edge */
650 *trig = INTR_TRIGGER_EDGE;
651 *pol = INTR_POLARITY_HIGH;
652 break;
653 case 1:
654 /* Active L level */
655 *trig = INTR_TRIGGER_LEVEL;
656 *pol = INTR_POLARITY_LOW;
657 break;
658 default:
659 *trig = INTR_TRIGGER_CONFORM;
660 *pol = INTR_POLARITY_CONFORM;
661 }
662 }
663
664 /* Private functions. */
665 /*
666 * Setup the current CPU. Called by the BSP at driver attachment, and by each
667 * AP at wakeup (via xive_smp_cpu_startup()).
668 */
669 static void
670 xive_setup_cpu(void)
671 {
672 struct xive_softc *sc;
673 struct xive_cpu *cpup;
674 uint32_t val;
675
676 cpup = DPCPU_PTR(xive_cpu_data);
677
678 sc = device_get_softc(root_pic);
679
680 val = bus_read_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD2);
681 if (val & TM_QW2W2_VP)
682 bus_read_8(sc->sc_mem, XIVE_TM_SPC_PULL_POOL_CTX);
683
684 bus_write_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD0, 0xff);
685 bus_write_4(sc->sc_mem, XIVE_TM_QW2_HV_POOL + TM_WORD2,
686 TM_QW2W2_VP | cpup->cam);
687
688 xive_unmask(root_pic, cpup->ipi_data.girq, &cpup->ipi_data);
689 xive_write_1(sc, XIVE_TM_CPPR, 0xff);
690 }
691
692 /* Populate an IRQ structure, mapping the EOI and trigger pages. */
693 static void
694 xive_init_irq(struct xive_irq *irqd, u_int irq)
695 {
696 uint64_t eoi_phys, trig_phys;
697 uint32_t esb_shift;
698
699 opal_call(OPAL_XIVE_GET_IRQ_INFO, irq,
700 vtophys(&irqd->flags), vtophys(&eoi_phys),
701 vtophys(&trig_phys), vtophys(&esb_shift),
702 vtophys(&irqd->chip));
703
704 irqd->flags = be64toh(irqd->flags);
705 eoi_phys = be64toh(eoi_phys);
706 trig_phys = be64toh(trig_phys);
707 esb_shift = be32toh(esb_shift);
708 irqd->chip = be32toh(irqd->chip);
709
710 irqd->girq = irq;
711 irqd->esb_size = 1 << esb_shift;
712 irqd->eoi_page = (vm_offset_t)pmap_mapdev(eoi_phys, irqd->esb_size);
713
714 if (eoi_phys == trig_phys)
715 irqd->trig_page = irqd->eoi_page;
716 else if (trig_phys != 0)
717 irqd->trig_page = (vm_offset_t)pmap_mapdev(trig_phys,
718 irqd->esb_size);
719 else
720 irqd->trig_page = 0;
721
722 opal_call(OPAL_XIVE_GET_IRQ_CONFIG, irq, vtophys(&irqd->vp),
723 vtophys(&irqd->prio), vtophys(&irqd->lirq));
724
725 irqd->vp = be64toh(irqd->vp);
726 irqd->prio = be64toh(irqd->prio);
727 irqd->lirq = be32toh(irqd->lirq);
728 }
729
730 /* Allocate an IRQ struct before populating it. */
731 static struct xive_irq *
732 xive_configure_irq(u_int irq)
733 {
734 struct xive_irq *irqd;
735
736 irqd = malloc(sizeof(struct xive_irq), M_XIVE, M_WAITOK);
737
738 xive_init_irq(irqd, irq);
739
740 return (irqd);
741 }
742
743 /*
744 * Part of the OPAL API. OPAL_XIVE_ALLOCATE_VP_BLOCK might require more pages,
745 * provisioned through this call.
746 */
747 static int
748 xive_provision_page(struct xive_softc *sc)
749 {
750 void *prov_page;
751 int error;
752
753 do {
754 prov_page = contigmalloc(sc->sc_prov_page_size, M_XIVE, 0,
755 0, BUS_SPACE_MAXADDR,
756 sc->sc_prov_page_size, sc->sc_prov_page_size);
757
758 error = opal_call(OPAL_XIVE_DONATE_PAGE, -1,
759 vtophys(prov_page));
760 } while (error == OPAL_XIVE_PROVISIONING);
761
762 return (0);
763 }
764
765 /* The XIVE_TM_CPPR register must be set by each thread */
766 static void
767 xive_smp_cpu_startup(void)
768 {
769
770 xive_setup_cpu();
771 }
Cache object: 3167675325526e56bf2b40d6c6125449
|