FreeBSD/Linux Kernel Cross Reference
sys/sparc64/pci/fire.c
1 /*-
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/11.2/sys/sparc64/pci/fire.c 305614 2016-09-08 15:05:25Z pfg $");
36
37 /*
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
39 * bridges
40 */
41
42 #include "opt_fire.h"
43 #include "opt_ofw_pci.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/interrupt.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/pciio.h>
55 #include <sys/pcpu.h>
56 #include <sys/rman.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 #include <sys/timetc.h>
60
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/openfirm.h>
63
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66
67 #include <machine/bus.h>
68 #include <machine/bus_common.h>
69 #include <machine/bus_private.h>
70 #include <machine/iommureg.h>
71 #include <machine/iommuvar.h>
72 #include <machine/resource.h>
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76
77 #include <sparc64/pci/ofw_pci.h>
78 #include <sparc64/pci/firereg.h>
79 #include <sparc64/pci/firevar.h>
80
81 #include "pcib_if.h"
82
83 struct fire_msiqarg;
84
85 static const struct fire_desc *fire_get_desc(device_t dev);
86 static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
87 bus_dmasync_op_t op);
88 static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
89 bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
90 static void fire_intr_assign(void *arg);
91 static void fire_intr_clear(void *arg);
92 static void fire_intr_disable(void *arg);
93 static void fire_intr_enable(void *arg);
94 static int fire_intr_register(struct fire_softc *sc, u_int ino);
95 static inline void fire_msiq_common(struct intr_vector *iv,
96 struct fire_msiqarg *fmqa);
97 static void fire_msiq_filter(void *cookie);
98 static void fire_msiq_handler(void *cookie);
99 static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
100 driver_filter_t handler, void *arg);
101 static timecounter_get_t fire_get_timecount;
102
103 /* Interrupt handlers */
104 static driver_filter_t fire_dmc_pec;
105 static driver_filter_t fire_pcie;
106 static driver_filter_t fire_xcb;
107
108 /*
109 * Methods
110 */
111 static pcib_alloc_msi_t fire_alloc_msi;
112 static pcib_alloc_msix_t fire_alloc_msix;
113 static bus_alloc_resource_t fire_alloc_resource;
114 static device_attach_t fire_attach;
115 static pcib_map_msi_t fire_map_msi;
116 static pcib_maxslots_t fire_maxslots;
117 static device_probe_t fire_probe;
118 static pcib_read_config_t fire_read_config;
119 static pcib_release_msi_t fire_release_msi;
120 static pcib_release_msix_t fire_release_msix;
121 static pcib_route_interrupt_t fire_route_interrupt;
122 static bus_setup_intr_t fire_setup_intr;
123 static bus_teardown_intr_t fire_teardown_intr;
124 static pcib_write_config_t fire_write_config;
125
126 static device_method_t fire_methods[] = {
127 /* Device interface */
128 DEVMETHOD(device_probe, fire_probe),
129 DEVMETHOD(device_attach, fire_attach),
130 DEVMETHOD(device_shutdown, bus_generic_shutdown),
131 DEVMETHOD(device_suspend, bus_generic_suspend),
132 DEVMETHOD(device_resume, bus_generic_resume),
133
134 /* Bus interface */
135 DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar),
136 DEVMETHOD(bus_setup_intr, fire_setup_intr),
137 DEVMETHOD(bus_teardown_intr, fire_teardown_intr),
138 DEVMETHOD(bus_alloc_resource, fire_alloc_resource),
139 DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource),
140 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
141 DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource),
142 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
143 DEVMETHOD(bus_get_dma_tag, ofw_pci_get_dma_tag),
144
145 /* pcib interface */
146 DEVMETHOD(pcib_maxslots, fire_maxslots),
147 DEVMETHOD(pcib_read_config, fire_read_config),
148 DEVMETHOD(pcib_write_config, fire_write_config),
149 DEVMETHOD(pcib_route_interrupt, fire_route_interrupt),
150 DEVMETHOD(pcib_alloc_msi, fire_alloc_msi),
151 DEVMETHOD(pcib_release_msi, fire_release_msi),
152 DEVMETHOD(pcib_alloc_msix, fire_alloc_msix),
153 DEVMETHOD(pcib_release_msix, fire_release_msix),
154 DEVMETHOD(pcib_map_msi, fire_map_msi),
155
156 /* ofw_bus interface */
157 DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node),
158
159 DEVMETHOD_END
160 };
161
162 static devclass_t fire_devclass;
163
164 DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
165 EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
166 BUS_PASS_BUS);
167 MODULE_DEPEND(fire, nexus, 1, 1, 1);
168
169 static const struct intr_controller fire_ic = {
170 fire_intr_enable,
171 fire_intr_disable,
172 fire_intr_assign,
173 fire_intr_clear
174 };
175
176 struct fire_icarg {
177 struct fire_softc *fica_sc;
178 bus_addr_t fica_map;
179 bus_addr_t fica_clr;
180 };
181
182 static const struct intr_controller fire_msiqc_filter = {
183 fire_intr_enable,
184 fire_intr_disable,
185 fire_intr_assign,
186 NULL
187 };
188
189 struct fire_msiqarg {
190 struct fire_icarg fmqa_fica;
191 struct mtx fmqa_mtx;
192 struct fo_msiq_record *fmqa_base;
193 uint64_t fmqa_head;
194 uint64_t fmqa_tail;
195 uint32_t fmqa_msiq;
196 uint32_t fmqa_msi;
197 };
198
199 #define FIRE_PERF_CNT_QLTY 100
200
201 #define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \
202 bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
203 #define FIRE_SPC_READ_8(spc, sc, offs) \
204 bus_read_8((sc)->sc_mem_res[(spc)], (offs))
205 #define FIRE_SPC_WRITE_8(spc, sc, offs, v) \
206 bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
207
208 #ifndef FIRE_DEBUG
209 #define FIRE_SPC_SET(spc, sc, offs, reg, v) \
210 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
211 #else
212 #define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \
213 device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \
214 (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \
215 (unsigned long long)(v)); \
216 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \
217 } while (0)
218 #endif
219
220 #define FIRE_PCI_BARRIER(sc, offs, len, flags) \
221 FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
222 #define FIRE_PCI_READ_8(sc, offs) \
223 FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
224 #define FIRE_PCI_WRITE_8(sc, offs, v) \
225 FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
226 #define FIRE_CTRL_BARRIER(sc, offs, len, flags) \
227 FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
228 #define FIRE_CTRL_READ_8(sc, offs) \
229 FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
230 #define FIRE_CTRL_WRITE_8(sc, offs, v) \
231 FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
232
233 #define FIRE_PCI_SET(sc, offs, v) \
234 FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
235 #define FIRE_CTRL_SET(sc, offs, v) \
236 FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
237
238 struct fire_desc {
239 const char *fd_string;
240 int fd_mode;
241 const char *fd_name;
242 };
243
244 static const struct fire_desc fire_compats[] = {
245 { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" },
246 #if 0
247 { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" },
248 #endif
249 { NULL, 0, NULL }
250 };
251
252 static const struct fire_desc *
253 fire_get_desc(device_t dev)
254 {
255 const struct fire_desc *desc;
256 const char *compat;
257
258 compat = ofw_bus_get_compat(dev);
259 if (compat == NULL)
260 return (NULL);
261 for (desc = fire_compats; desc->fd_string != NULL; desc++)
262 if (strcmp(desc->fd_string, compat) == 0)
263 return (desc);
264 return (NULL);
265 }
266
267 static int
268 fire_probe(device_t dev)
269 {
270 const char *dtype;
271
272 dtype = ofw_bus_get_type(dev);
273 if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
274 fire_get_desc(dev) != NULL) {
275 device_set_desc(dev, "Sun Host-PCIe bridge");
276 return (BUS_PROBE_GENERIC);
277 }
278 return (ENXIO);
279 }
280
281 static int
282 fire_attach(device_t dev)
283 {
284 struct fire_softc *sc;
285 const struct fire_desc *desc;
286 struct ofw_pci_msi_ranges msi_ranges;
287 struct ofw_pci_msi_addr_ranges msi_addr_ranges;
288 struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
289 struct fire_msiqarg *fmqa;
290 struct timecounter *tc;
291 bus_dma_tag_t dmat;
292 uint64_t ino_bitmap, val;
293 phandle_t node;
294 uint32_t prop, prop_array[2];
295 int i, j, mode;
296 u_int lw;
297 uint16_t mps;
298
299 sc = device_get_softc(dev);
300 node = ofw_bus_get_node(dev);
301 desc = fire_get_desc(dev);
302 mode = desc->fd_mode;
303
304 sc->sc_dev = dev;
305 sc->sc_mode = mode;
306 sc->sc_flags = 0;
307
308 mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
309 mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
310
311 /*
312 * Fire and Oberon have two register banks:
313 * (0) per-PBM PCI Express configuration and status registers
314 * (1) (shared) Fire/Oberon controller configuration and status
315 * registers
316 */
317 for (i = 0; i < FIRE_NREG; i++) {
318 j = i;
319 sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
320 SYS_RES_MEMORY, &j, RF_ACTIVE);
321 if (sc->sc_mem_res[i] == NULL)
322 panic("%s: could not allocate register bank %d",
323 __func__, i);
324 }
325
326 if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
327 panic("%s: could not determine IGN", __func__);
328 if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
329 panic("%s: could not determine module-revision", __func__);
330
331 device_printf(dev, "%s, module-revision %d, IGN %#x\n",
332 desc->fd_name, prop, sc->sc_ign);
333
334 /*
335 * Hunt through all the interrupt mapping regs and register
336 * the interrupt controller for our interrupt vectors. We do
337 * this early in order to be able to catch stray interrupts.
338 */
339 i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
340 sizeof(prop_array));
341 if (i == -1)
342 panic("%s: could not get ino-bitmap", __func__);
343 ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
344 for (i = 0; i <= FO_MAX_INO; i++) {
345 if ((ino_bitmap & (1ULL << i)) == 0)
346 continue;
347 j = fire_intr_register(sc, i);
348 if (j != 0)
349 device_printf(dev, "could not register interrupt "
350 "controller for INO %d (%d)\n", i, j);
351 }
352
353 /* JBC/UBC module initialization */
354 FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
355 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
356 /* not enabled by OpenSolaris */
357 FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
358 if (sc->sc_mode == FIRE_MODE_FIRE) {
359 FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
360 FIRE_JBUS_PAR_CTRL_P_EN);
361 FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
362 ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
363 FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
364 FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
365 FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
366 FIRE_JBC_FATAL_RST_EN_APE_P_INT |
367 FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
368 FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
369 FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
370 FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
371 FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
372 }
373
374 /* TLU initialization */
375 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
376 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
377 /* not enabled by OpenSolaris */
378 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
379 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
380 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
381 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
382 /* not enabled by OpenSolaris */
383 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
384 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
385 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
386 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
387 /* not enabled by OpenSolaris */
388 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
389 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
390 val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
391 ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
392 FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
393 ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
394 FO_PCI_TLU_CTRL_CFG_MASK);
395 if (sc->sc_mode == FIRE_MODE_OBERON)
396 val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
397 val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
398 FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
399 FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
400 FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
401
402 /* DLU/LPU initialization */
403 if (sc->sc_mode == FIRE_MODE_OBERON)
404 FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
405 else
406 FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
407 FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
408 FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
409 FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
410 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
411 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
412 if (sc->sc_mode == FIRE_MODE_OBERON)
413 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
414 (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
415 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
416 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
417 else {
418 switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
419 FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
420 FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
421 case 1:
422 lw = 0;
423 break;
424 case 4:
425 lw = 1;
426 break;
427 case 8:
428 lw = 2;
429 break;
430 case 16:
431 lw = 3;
432 break;
433 default:
434 lw = 0;
435 }
436 mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
437 FO_PCI_TLU_CTRL_CFG_MPS_MASK) >>
438 FO_PCI_TLU_CTRL_CFG_MPS_SHFT;
439 i = sizeof(fire_freq_nak_tmr_thrs) /
440 sizeof(*fire_freq_nak_tmr_thrs);
441 if (mps >= i)
442 mps = i - 1;
443 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
444 (fire_freq_nak_tmr_thrs[mps][lw] <<
445 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
446 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
447 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
448 (fire_rply_tmr_thrs[mps][lw] <<
449 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
450 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
451 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
452 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
453 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
454 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
455 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
456 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
457 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
458 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
459 (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
460 FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
461 FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
462 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
463 (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
464 FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
465 FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
466 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
467 ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
468 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
469 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
470 ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
471 FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
472 FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
473 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
474 }
475
476 /* ILU initialization */
477 FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
478 /* not enabled by OpenSolaris */
479 FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
480
481 /* IMU initialization */
482 FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
483 FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
484 FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
485 ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
486 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
487 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
488 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
489 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
490 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
491
492 /* MMU initialization */
493 FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
494 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
495 /* not enabled by OpenSolaris */
496 FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
497 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
498
499 /* DMC initialization */
500 FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
501 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
502 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
503
504 /* PEC initialization */
505 FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
506
507 /* Establish handlers for interesting interrupts. */
508 if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
509 fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
510 if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
511 fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
512
513 /* MSI/MSI-X support */
514 if (OF_getprop(node, "#msi", &sc->sc_msi_count,
515 sizeof(sc->sc_msi_count)) == -1)
516 panic("%s: could not determine MSI count", __func__);
517 if (OF_getprop(node, "msi-ranges", &msi_ranges,
518 sizeof(msi_ranges)) == -1)
519 sc->sc_msi_first = 0;
520 else
521 sc->sc_msi_first = msi_ranges.first;
522 if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
523 sizeof(sc->sc_msi_data_mask)) == -1)
524 panic("%s: could not determine MSI data mask", __func__);
525 if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
526 sizeof(sc->sc_msix_data_width)) > 0)
527 sc->sc_flags |= FIRE_MSIX;
528 if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
529 sizeof(msi_addr_ranges)) == -1)
530 panic("%s: could not determine MSI address ranges", __func__);
531 sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
532 sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
533 if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
534 sizeof(sc->sc_msiq_count)) == -1)
535 panic("%s: could not determine MSI event queue count",
536 __func__);
537 if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
538 sizeof(sc->sc_msiq_size)) == -1)
539 panic("%s: could not determine MSI event queue size",
540 __func__);
541 if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
542 sizeof(msi_eq_to_devino)) == -1 &&
543 OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
544 sizeof(msi_eq_to_devino)) == -1) {
545 sc->sc_msiq_first = 0;
546 sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
547 } else {
548 sc->sc_msiq_first = msi_eq_to_devino.eq_first;
549 sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
550 }
551 if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
552 sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
553 panic("%s: event queues exceed INO range", __func__);
554 sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
555 M_DEVBUF, M_NOWAIT | M_ZERO);
556 if (sc->sc_msi_bitmap == NULL)
557 panic("%s: could not malloc MSI bitmap", __func__);
558 sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
559 sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
560 if (sc->sc_msi_msiq_table == NULL)
561 panic("%s: could not malloc MSI-MSI event queue table",
562 __func__);
563 sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
564 M_DEVBUF, M_NOWAIT | M_ZERO);
565 if (sc->sc_msiq_bitmap == NULL)
566 panic("%s: could not malloc MSI event queue bitmap", __func__);
567 j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
568 sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
569 FO_EQ_ALIGNMENT, 0);
570 if (sc->sc_msiq == NULL)
571 panic("%s: could not contigmalloc MSI event queue", __func__);
572 memset(sc->sc_msiq, 0, j);
573 FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
574 (pmap_kextract((vm_offset_t)sc->sc_msiq) &
575 FO_PCI_EQ_BASE_ADDR_MASK));
576 for (i = 0; i < sc->sc_msi_count; i++) {
577 j = (i + sc->sc_msi_first) << 3;
578 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
579 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
580 ~FO_PCI_MSI_MAP_V);
581 }
582 for (i = 0; i < sc->sc_msiq_count; i++) {
583 j = i + sc->sc_msiq_ino_first;
584 if ((ino_bitmap & (1ULL << j)) == 0) {
585 mtx_lock(&sc->sc_msi_mtx);
586 setbit(sc->sc_msiq_bitmap, i);
587 mtx_unlock(&sc->sc_msi_mtx);
588 }
589 fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
590 mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
591 fmqa->fmqa_base =
592 (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
593 (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
594 j = i + sc->sc_msiq_first;
595 fmqa->fmqa_msiq = j;
596 j <<= 3;
597 fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
598 fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
599 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
600 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
601 FO_PCI_EQ_CTRL_CLR_DIS);
602 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
603 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
604 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
605 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
606 }
607 FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
608 FO_PCI_MSI_32_BIT_ADDR_MASK);
609 FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
610 FO_PCI_MSI_64_BIT_ADDR_MASK);
611
612 /*
613 * Establish a handler for interesting PCIe messages and disable
614 * unintersting ones.
615 */
616 mtx_lock(&sc->sc_msi_mtx);
617 for (i = 0; i < sc->sc_msiq_count; i++) {
618 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
619 j = i;
620 break;
621 }
622 }
623 if (i == sc->sc_msiq_count) {
624 mtx_unlock(&sc->sc_msi_mtx);
625 panic("%s: no spare event queue for PCIe messages", __func__);
626 }
627 setbit(sc->sc_msiq_bitmap, j);
628 mtx_unlock(&sc->sc_msi_mtx);
629 i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
630 if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
631 panic("%s: failed to add interrupt for PCIe messages",
632 __func__);
633 fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
634 j += sc->sc_msiq_first;
635 /*
636 * "Please note that setting the EQNUM field to a value larger than
637 * 35 will yield unpredictable results."
638 */
639 if (j > 35)
640 panic("%s: invalid queue for PCIe messages (%d)",
641 __func__, j);
642 FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
643 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
644 FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
645 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
646 FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
647 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
648 FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
649 FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
650 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
651 FO_PCI_EQ_CTRL_SET_EN);
652
653 #define TC_COUNTER_MAX_MASK 0xffffffff
654
655 /*
656 * Setup JBC/UBC performance counter 0 in bus cycle counting
657 * mode as timecounter.
658 */
659 if (device_get_unit(dev) == 0) {
660 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
661 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
662 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
663 (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
664 (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
665 tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
666 if (tc == NULL)
667 panic("%s: could not malloc timecounter", __func__);
668 tc->tc_get_timecount = fire_get_timecount;
669 tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
670 if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
671 sizeof(prop)) == -1)
672 panic("%s: could not determine clock frequency",
673 __func__);
674 tc->tc_frequency = prop;
675 tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
676 tc->tc_priv = sc;
677 /*
678 * Due to initial problems with the JBus-driven performance
679 * counters not advancing which might be firmware dependent
680 * ensure that it actually works.
681 */
682 if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0)
683 tc->tc_quality = FIRE_PERF_CNT_QLTY;
684 else
685 tc->tc_quality = -FIRE_PERF_CNT_QLTY;
686 tc_init(tc);
687 }
688
689 /*
690 * Set up the IOMMU. Both Fire and Oberon have one per PBM, but
691 * neither has a streaming buffer.
692 */
693 memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
694 sizeof(sc->sc_dma_methods));
695 sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
696 if (sc->sc_mode == FIRE_MODE_OBERON) {
697 sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
698 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
699 } else {
700 sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
701 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
702 }
703 sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
704 /* Punch in our copies. */
705 sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
706 sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
707 sc->sc_is.is_iommu = FO_PCI_MMU;
708 val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
709 iommu_init(device_get_nameunit(dev), &sc->sc_is, 7, -1, 0);
710 #ifdef FIRE_DEBUG
711 device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
712 (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
713 #endif
714 /* Create our DMA tag. */
715 if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000,
716 sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
717 0xff, 0xffffffff, 0, NULL, NULL, &dmat) != 0)
718 panic("%s: could not create PCI DMA tag", __func__);
719 dmat->dt_cookie = &sc->sc_is;
720 dmat->dt_mt = &sc->sc_dma_methods;
721
722 if (ofw_pci_attach_common(dev, dmat, FO_IO_SIZE, FO_MEM_SIZE) != 0)
723 panic("%s: ofw_pci_attach_common() failed", __func__);
724
725 #define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \
726 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \
727 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \
728 (name), CTLFLAG_RD, (arg), 0, (desc))
729
730 FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
731 "ILU unknown errors");
732 FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
733 "JBC correctable errors");
734 FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
735 "JBC unsolicited interrupt ACK/NACK errors");
736 FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
737 "JBC unsolicited read response errors");
738 FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
739 FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
740 "DLU/TLU correctable errors");
741 FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
742 &sc->sc_stats_tlu_oe_non_fatal,
743 "DLU/TLU other event non-fatal errors summary");
744 FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
745 "DLU/TLU receive other event errors");
746 FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
747 "DLU/TLU transmit other event errors");
748 FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
749 "UBC DMARDUE erros");
750
751 #undef FIRE_SYSCTL_ADD_UINT
752
753 device_add_child(dev, "pci", -1);
754 return (bus_generic_attach(dev));
755 }
756
757 static void
758 fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
759 driver_filter_t handler, void *arg)
760 {
761 u_long vec;
762 int rid;
763
764 rid = index;
765 sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
766 SYS_RES_IRQ, &rid, RF_ACTIVE);
767 if (sc->sc_irq_res[index] == NULL ||
768 INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
769 INTIGN(vec) != sc->sc_ign ||
770 intr_vectors[vec].iv_ic != &fire_ic ||
771 bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
772 INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
773 &sc->sc_ihand[index]) != 0)
774 panic("%s: failed to set up interrupt %d", __func__, index);
775 }
776
777 static int
778 fire_intr_register(struct fire_softc *sc, u_int ino)
779 {
780 struct fire_icarg *fica;
781 bus_addr_t intrclr, intrmap;
782 int error;
783
784 if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
785 return (ENXIO);
786 fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
787 sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
788 M_NOWAIT | M_ZERO);
789 if (fica == NULL)
790 return (ENOMEM);
791 fica->fica_sc = sc;
792 fica->fica_map = intrmap;
793 fica->fica_clr = intrclr;
794 error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
795 &fire_ic, fica));
796 if (error != 0)
797 free(fica, M_DEVBUF);
798 return (error);
799 }
800
801 static int
802 fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
803 bus_addr_t *intrclrptr)
804 {
805
806 if (ino > FO_MAX_INO) {
807 device_printf(sc->sc_dev, "out of range INO %d requested\n",
808 ino);
809 return (0);
810 }
811
812 ino <<= 3;
813 if (intrmapptr != NULL)
814 *intrmapptr = FO_PCI_INT_MAP_BASE + ino;
815 if (intrclrptr != NULL)
816 *intrclrptr = FO_PCI_INT_CLR_BASE + ino;
817 return (1);
818 }
819
820 /*
821 * Interrupt handlers
822 */
823 static int
824 fire_dmc_pec(void *arg)
825 {
826 struct fire_softc *sc;
827 device_t dev;
828 uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
829 uint64_t mmutfsr, oestat, pecstat, uestat, val;
830 u_int fatal, oenfatal;
831
832 fatal = 0;
833 sc = arg;
834 dev = sc->sc_dev;
835 mtx_lock_spin(&sc->sc_pcib_mtx);
836 mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
837 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
838 dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
839 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
840 imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
841 device_printf(dev, "IMU error %#llx\n",
842 (unsigned long long)imustat);
843 if ((imustat &
844 FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
845 fatal = 1;
846 val = FIRE_PCI_READ_8(sc,
847 FO_PCI_IMU_SCS_ERR_LOG);
848 device_printf(dev, "SCS error log %#llx\n",
849 (unsigned long long)val);
850 }
851 if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
852 fatal = 1;
853 val = FIRE_PCI_READ_8(sc,
854 FO_PCI_IMU_EQS_ERR_LOG);
855 device_printf(dev, "EQS error log %#llx\n",
856 (unsigned long long)val);
857 }
858 if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
859 FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
860 FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
861 FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
862 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
863 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
864 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
865 FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
866 fatal = 1;
867 val = FIRE_PCI_READ_8(sc,
868 FO_PCI_IMU_RDS_ERR_LOG);
869 device_printf(dev, "RDS error log %#llx\n",
870 (unsigned long long)val);
871 }
872 }
873 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
874 fatal = 1;
875 mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
876 mmutfar = FIRE_PCI_READ_8(sc,
877 FO_PCI_MMU_TRANS_FAULT_ADDR);
878 mmutfsr = FIRE_PCI_READ_8(sc,
879 FO_PCI_MMU_TRANS_FAULT_STAT);
880 if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
881 FO_PCI_MMU_ERR_INT_TBW_ERR_P |
882 FO_PCI_MMU_ERR_INT_TBW_UDE_P |
883 FO_PCI_MMU_ERR_INT_TBW_DME_P |
884 FO_PCI_MMU_ERR_INT_TTC_CAE_P |
885 FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
886 OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
887 FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
888 fatal = 1;
889 else {
890 sc->sc_stats_mmu_err++;
891 FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
892 mmustat);
893 }
894 device_printf(dev,
895 "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
896 (unsigned long long)mmustat,
897 (unsigned long long)mmutfar,
898 (unsigned long long)mmutfsr);
899 }
900 }
901 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
902 pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
903 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
904 fatal = 1;
905 uestat = FIRE_PCI_READ_8(sc,
906 FO_PCI_TLU_UERR_INT_STAT);
907 device_printf(dev,
908 "DLU/TLU uncorrectable error %#llx\n",
909 (unsigned long long)uestat);
910 if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
911 OBERON_PCI_TLU_UERR_INT_POIS_P |
912 FO_PCI_TLU_UERR_INT_MFP_P |
913 FO_PCI_TLU_UERR_INT_ROF_P |
914 FO_PCI_TLU_UERR_INT_UC_P |
915 FIRE_PCI_TLU_UERR_INT_PP_P |
916 OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
917 val = FIRE_PCI_READ_8(sc,
918 FO_PCI_TLU_RX_UERR_HDR1_LOG);
919 device_printf(dev,
920 "receive header log %#llx\n",
921 (unsigned long long)val);
922 val = FIRE_PCI_READ_8(sc,
923 FO_PCI_TLU_RX_UERR_HDR2_LOG);
924 device_printf(dev,
925 "receive header log 2 %#llx\n",
926 (unsigned long long)val);
927 }
928 if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
929 val = FIRE_PCI_READ_8(sc,
930 FO_PCI_TLU_TX_UERR_HDR1_LOG);
931 device_printf(dev,
932 "transmit header log %#llx\n",
933 (unsigned long long)val);
934 val = FIRE_PCI_READ_8(sc,
935 FO_PCI_TLU_TX_UERR_HDR2_LOG);
936 device_printf(dev,
937 "transmit header log 2 %#llx\n",
938 (unsigned long long)val);
939 }
940 if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
941 val = FIRE_PCI_READ_8(sc,
942 FO_PCI_LPU_LNK_LYR_INT_STAT);
943 device_printf(dev,
944 "link layer interrupt and status %#llx\n",
945 (unsigned long long)val);
946 }
947 if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
948 val = FIRE_PCI_READ_8(sc,
949 FO_PCI_LPU_PHY_LYR_INT_STAT);
950 device_printf(dev,
951 "phy layer interrupt and status %#llx\n",
952 (unsigned long long)val);
953 }
954 }
955 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
956 sc->sc_stats_tlu_ce++;
957 cestat = FIRE_PCI_READ_8(sc,
958 FO_PCI_TLU_CERR_INT_STAT);
959 device_printf(dev,
960 "DLU/TLU correctable error %#llx\n",
961 (unsigned long long)cestat);
962 val = FIRE_PCI_READ_8(sc,
963 FO_PCI_LPU_LNK_LYR_INT_STAT);
964 device_printf(dev,
965 "link layer interrupt and status %#llx\n",
966 (unsigned long long)val);
967 if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
968 FIRE_PCI_WRITE_8(sc,
969 FO_PCI_LPU_LNK_LYR_INT_STAT, val);
970 val = FIRE_PCI_READ_8(sc,
971 FO_PCI_LPU_PHY_LYR_INT_STAT);
972 device_printf(dev,
973 "phy layer interrupt and status %#llx\n",
974 (unsigned long long)val);
975 }
976 FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
977 cestat);
978 }
979 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
980 oenfatal = 0;
981 oestat = FIRE_PCI_READ_8(sc,
982 FO_PCI_TLU_OEVENT_INT_STAT);
983 device_printf(dev, "DLU/TLU other event %#llx\n",
984 (unsigned long long)oestat);
985 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
986 FO_PCI_TLU_OEVENT_MRC_P |
987 FO_PCI_TLU_OEVENT_WUC_P |
988 FO_PCI_TLU_OEVENT_RUC_P |
989 FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
990 val = FIRE_PCI_READ_8(sc,
991 FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
992 device_printf(dev,
993 "receive header log %#llx\n",
994 (unsigned long long)val);
995 val = FIRE_PCI_READ_8(sc,
996 FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
997 device_printf(dev,
998 "receive header log 2 %#llx\n",
999 (unsigned long long)val);
1000 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1001 FO_PCI_TLU_OEVENT_MRC_P |
1002 FO_PCI_TLU_OEVENT_WUC_P |
1003 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1004 fatal = 1;
1005 else {
1006 sc->sc_stats_tlu_oe_rx_err++;
1007 oenfatal = 1;
1008 }
1009 }
1010 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1011 FO_PCI_TLU_OEVENT_CTO_P |
1012 FO_PCI_TLU_OEVENT_WUC_P |
1013 FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1014 val = FIRE_PCI_READ_8(sc,
1015 FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1016 device_printf(dev,
1017 "transmit header log %#llx\n",
1018 (unsigned long long)val);
1019 val = FIRE_PCI_READ_8(sc,
1020 FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1021 device_printf(dev,
1022 "transmit header log 2 %#llx\n",
1023 (unsigned long long)val);
1024 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1025 FO_PCI_TLU_OEVENT_CTO_P |
1026 FO_PCI_TLU_OEVENT_WUC_P |
1027 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1028 fatal = 1;
1029 else {
1030 sc->sc_stats_tlu_oe_tx_err++;
1031 oenfatal = 1;
1032 }
1033 }
1034 if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1035 FO_PCI_TLU_OEVENT_EMP_P |
1036 FO_PCI_TLU_OEVENT_EPE_P |
1037 FIRE_PCI_TLU_OEVENT_ERP_P |
1038 OBERON_PCI_TLU_OEVENT_ERBU_P |
1039 FIRE_PCI_TLU_OEVENT_EIP_P |
1040 OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1041 fatal = 1;
1042 val = FIRE_PCI_READ_8(sc,
1043 FO_PCI_LPU_LNK_LYR_INT_STAT);
1044 device_printf(dev,
1045 "link layer interrupt and status %#llx\n",
1046 (unsigned long long)val);
1047 }
1048 if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1049 FO_PCI_TLU_OEVENT_EDP_P |
1050 FIRE_PCI_TLU_OEVENT_EHP_P |
1051 OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1052 FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1053 fatal = 1;
1054 if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1055 FO_PCI_TLU_OEVENT_LWC_P |
1056 FO_PCI_TLU_OEVENT_LIN_P |
1057 FO_PCI_TLU_OEVENT_LRS_P |
1058 FO_PCI_TLU_OEVENT_LDN_P |
1059 FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1060 oenfatal = 1;
1061 if (oenfatal != 0) {
1062 sc->sc_stats_tlu_oe_non_fatal++;
1063 FIRE_PCI_WRITE_8(sc,
1064 FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1065 if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1066 FIRE_PCI_WRITE_8(sc,
1067 FO_PCI_LPU_LNK_LYR_INT_STAT,
1068 FIRE_PCI_READ_8(sc,
1069 FO_PCI_LPU_LNK_LYR_INT_STAT));
1070 }
1071 }
1072 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1073 ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1074 device_printf(dev, "ILU error %#llx\n",
1075 (unsigned long long)ilustat);
1076 if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1077 FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1078 fatal = 1;
1079 else {
1080 sc->sc_stats_ilu_err++;
1081 FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1082 ilustat);
1083 }
1084 }
1085 }
1086 mtx_unlock_spin(&sc->sc_pcib_mtx);
1087 if (fatal != 0)
1088 panic("%s: fatal DMC/PEC error",
1089 device_get_nameunit(sc->sc_dev));
1090 return (FILTER_HANDLED);
1091 }
1092
1093 static int
1094 fire_xcb(void *arg)
1095 {
1096 struct fire_softc *sc;
1097 device_t dev;
1098 uint64_t errstat, intstat, val;
1099 u_int fatal;
1100
1101 fatal = 0;
1102 sc = arg;
1103 dev = sc->sc_dev;
1104 mtx_lock_spin(&sc->sc_pcib_mtx);
1105 if (sc->sc_mode == FIRE_MODE_OBERON) {
1106 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1107 device_printf(dev, "UBC error: interrupt status %#llx\n",
1108 (unsigned long long)intstat);
1109 if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1110 OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1111 fatal = 1;
1112 else
1113 sc->sc_stats_ubc_dmardue++;
1114 if (fatal != 0) {
1115 mtx_unlock_spin(&sc->sc_pcib_mtx);
1116 panic("%s: fatal UBC core block error",
1117 device_get_nameunit(sc->sc_dev));
1118 } else {
1119 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1120 mtx_unlock_spin(&sc->sc_pcib_mtx);
1121 }
1122 } else {
1123 errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1124 if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1125 FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1126 FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1127 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1128 device_printf(dev, "JBC interrupt status %#llx\n",
1129 (unsigned long long)intstat);
1130 if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1131 val = FIRE_CTRL_READ_8(sc,
1132 FIRE_JBC_CSR_ERR_LOG);
1133 device_printf(dev, "CSR error log %#llx\n",
1134 (unsigned long long)val);
1135 }
1136 if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1137 FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1138 if ((intstat &
1139 FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1140 sc->sc_stats_jbc_unsol_rd++;
1141 if ((intstat &
1142 FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1143 sc->sc_stats_jbc_unsol_int++;
1144 val = FIRE_CTRL_READ_8(sc,
1145 FIRE_DMCINT_IDC_ERR_LOG);
1146 device_printf(dev,
1147 "DMCINT IDC error log %#llx\n",
1148 (unsigned long long)val);
1149 }
1150 if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1151 FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1152 fatal = 1;
1153 val = FIRE_CTRL_READ_8(sc,
1154 FIRE_MERGE_TRANS_ERR_LOG);
1155 device_printf(dev,
1156 "merge transaction error log %#llx\n",
1157 (unsigned long long)val);
1158 }
1159 if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1160 fatal = 1;
1161 val = FIRE_CTRL_READ_8(sc,
1162 FIRE_JBCINT_OTRANS_ERR_LOG);
1163 device_printf(dev,
1164 "JBCINT out transaction error log "
1165 "%#llx\n", (unsigned long long)val);
1166 val = FIRE_CTRL_READ_8(sc,
1167 FIRE_JBCINT_OTRANS_ERR_LOG2);
1168 device_printf(dev,
1169 "JBCINT out transaction error log 2 "
1170 "%#llx\n", (unsigned long long)val);
1171 }
1172 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1173 FIRE_JBC_ERR_INT_CE_ASYN_P |
1174 FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1175 FIRE_JBC_ERR_INT_JUE_P |
1176 FIRE_JBC_ERR_INT_ICISE_P |
1177 FIRE_JBC_ERR_INT_WR_DPE_P |
1178 FIRE_JBC_ERR_INT_RD_DPE_P |
1179 FIRE_JBC_ERR_INT_ILL_BMW_P |
1180 FIRE_JBC_ERR_INT_ILL_BMR_P |
1181 FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1182 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1183 FIRE_JBC_ERR_INT_JTE_P |
1184 FIRE_JBC_ERR_INT_JBE_P |
1185 FIRE_JBC_ERR_INT_JUE_P |
1186 FIRE_JBC_ERR_INT_ICISE_P |
1187 FIRE_JBC_ERR_INT_WR_DPE_P |
1188 FIRE_JBC_ERR_INT_RD_DPE_P |
1189 FIRE_JBC_ERR_INT_ILL_BMW_P |
1190 FIRE_JBC_ERR_INT_ILL_BMR_P |
1191 FIRE_JBC_ERR_INT_BJC_P)) != 0)
1192 fatal = 1;
1193 else
1194 sc->sc_stats_jbc_ce_async++;
1195 val = FIRE_CTRL_READ_8(sc,
1196 FIRE_JBCINT_ITRANS_ERR_LOG);
1197 device_printf(dev,
1198 "JBCINT in transaction error log %#llx\n",
1199 (unsigned long long)val);
1200 val = FIRE_CTRL_READ_8(sc,
1201 FIRE_JBCINT_ITRANS_ERR_LOG2);
1202 device_printf(dev,
1203 "JBCINT in transaction error log 2 "
1204 "%#llx\n", (unsigned long long)val);
1205 }
1206 if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1207 FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1208 FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1209 FIRE_JBC_ERR_INT_PIO_DPE_P |
1210 FIRE_JBC_ERR_INT_PIO_CPE_P |
1211 FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1212 fatal = 1;
1213 val = FIRE_CTRL_READ_8(sc,
1214 FIRE_JBC_CSR_ERR_LOG);
1215 device_printf(dev,
1216 "DMCINT ODCD error log %#llx\n",
1217 (unsigned long long)val);
1218 }
1219 if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1220 FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1221 FIRE_JBC_ERR_INT_PIO_CPE_P |
1222 FIRE_JBC_ERR_INT_JTCEEW_P |
1223 FIRE_JBC_ERR_INT_JTCEEI_P |
1224 FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1225 fatal = 1;
1226 val = FIRE_CTRL_READ_8(sc,
1227 FIRE_FATAL_ERR_LOG);
1228 device_printf(dev, "fatal error log %#llx\n",
1229 (unsigned long long)val);
1230 val = FIRE_CTRL_READ_8(sc,
1231 FIRE_FATAL_ERR_LOG2);
1232 device_printf(dev, "fatal error log 2 "
1233 "%#llx\n", (unsigned long long)val);
1234 }
1235 if (fatal != 0) {
1236 mtx_unlock_spin(&sc->sc_pcib_mtx);
1237 panic("%s: fatal JBC core block error",
1238 device_get_nameunit(sc->sc_dev));
1239 } else {
1240 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1241 mtx_unlock_spin(&sc->sc_pcib_mtx);
1242 }
1243 } else {
1244 mtx_unlock_spin(&sc->sc_pcib_mtx);
1245 panic("%s: unknown JCB core block error status %#llx",
1246 device_get_nameunit(sc->sc_dev),
1247 (unsigned long long)errstat);
1248 }
1249 }
1250 return (FILTER_HANDLED);
1251 }
1252
1253 static int
1254 fire_pcie(void *arg)
1255 {
1256 struct fire_msiqarg *fmqa;
1257 struct fire_softc *sc;
1258 struct fo_msiq_record *qrec;
1259 device_t dev;
1260 uint64_t word0;
1261 u_int head, msg, msiq;
1262
1263 fmqa = arg;
1264 sc = fmqa->fmqa_fica.fica_sc;
1265 dev = sc->sc_dev;
1266 msiq = fmqa->fmqa_msiq;
1267 mtx_lock_spin(&fmqa->fmqa_mtx);
1268 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1269 FO_PCI_EQ_HD_SHFT;
1270 qrec = &fmqa->fmqa_base[head];
1271 word0 = qrec->fomqr_word0;
1272 for (;;) {
1273 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1274 ("%s: received non-PCIe message in event queue %d "
1275 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1276 (unsigned long long)word0));
1277 msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1278 FO_MQR_WORD0_DATA0_SHFT;
1279
1280 #define PCIE_MSG_CODE_ERR_COR 0x30
1281 #define PCIE_MSG_CODE_ERR_NONFATAL 0x31
1282 #define PCIE_MSG_CODE_ERR_FATAL 0x33
1283
1284 if (msg == PCIE_MSG_CODE_ERR_COR)
1285 device_printf(dev, "correctable PCIe error\n");
1286 else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1287 msg == PCIE_MSG_CODE_ERR_FATAL)
1288 panic("%s: %sfatal PCIe error",
1289 device_get_nameunit(dev),
1290 msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1291 else
1292 panic("%s: received unknown PCIe message %#x",
1293 device_get_nameunit(dev), msg);
1294 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1295 head = (head + 1) % sc->sc_msiq_size;
1296 qrec = &fmqa->fmqa_base[head];
1297 word0 = qrec->fomqr_word0;
1298 if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1299 break;
1300 }
1301 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1302 FO_PCI_EQ_HD_SHFT);
1303 if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1304 FO_PCI_EQ_TL_OVERR) != 0) {
1305 device_printf(dev, "event queue %d overflow\n", msiq);
1306 msiq <<= 3;
1307 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1308 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1309 FO_PCI_EQ_CTRL_CLR_COVERR);
1310 }
1311 mtx_unlock_spin(&fmqa->fmqa_mtx);
1312 return (FILTER_HANDLED);
1313 }
1314
1315 static int
1316 fire_maxslots(device_t dev)
1317 {
1318
1319 return (1);
1320 }
1321
1322 static uint32_t
1323 fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1324 int width)
1325 {
1326
1327 return (ofw_pci_read_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus,
1328 slot, func, reg), bus, slot, func, reg, width));
1329 }
1330
1331 static void
1332 fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1333 uint32_t val, int width)
1334 {
1335
1336 ofw_pci_write_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot,
1337 func, reg), bus, slot, func, reg, val, width);
1338 }
1339
1340 static int
1341 fire_route_interrupt(device_t bridge, device_t dev, int pin)
1342 {
1343 ofw_pci_intr_t mintr;
1344
1345 mintr = ofw_pci_route_interrupt_common(bridge, dev, pin);
1346 if (!PCI_INTERRUPT_VALID(mintr))
1347 device_printf(bridge,
1348 "could not route pin %d for device %d.%d\n",
1349 pin, pci_get_slot(dev), pci_get_function(dev));
1350 return (mintr);
1351 }
1352
1353 static void
1354 fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1355 bus_dmasync_op_t op)
1356 {
1357
1358 if ((map->dm_flags & DMF_LOADED) == 0)
1359 return;
1360
1361 if ((op & BUS_DMASYNC_POSTREAD) != 0)
1362 ofw_pci_dmamap_sync_stst_order_common();
1363 else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1364 membar(Sync);
1365 }
1366
1367 static void
1368 fire_intr_enable(void *arg)
1369 {
1370 struct intr_vector *iv;
1371 struct fire_icarg *fica;
1372 struct fire_softc *sc;
1373 struct pcpu *pc;
1374 uint64_t mr;
1375 u_int ctrl, i;
1376
1377 iv = arg;
1378 fica = iv->iv_icarg;
1379 sc = fica->fica_sc;
1380 mr = FO_PCI_IMAP_V;
1381 if (sc->sc_mode == FIRE_MODE_OBERON)
1382 mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1383 OBERON_PCI_IMAP_T_DESTID_MASK;
1384 else
1385 mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1386 FIRE_PCI_IMAP_T_JPID_MASK;
1387 /*
1388 * Given that all mondos for the same target are required to use the
1389 * same interrupt controller we just use the CPU ID for indexing the
1390 * latter.
1391 */
1392 ctrl = 0;
1393 for (i = 0; i < mp_ncpus; ++i) {
1394 pc = pcpu_find(i);
1395 if (pc == NULL || iv->iv_mid != pc->pc_mid)
1396 continue;
1397 ctrl = pc->pc_cpuid % 4;
1398 break;
1399 }
1400 mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1401 FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1402 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1403 }
1404
1405 static void
1406 fire_intr_disable(void *arg)
1407 {
1408 struct intr_vector *iv;
1409 struct fire_icarg *fica;
1410 struct fire_softc *sc;
1411
1412 iv = arg;
1413 fica = iv->iv_icarg;
1414 sc = fica->fica_sc;
1415 FIRE_PCI_WRITE_8(sc, fica->fica_map,
1416 FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1417 }
1418
1419 static void
1420 fire_intr_assign(void *arg)
1421 {
1422 struct intr_vector *iv;
1423 struct fire_icarg *fica;
1424 struct fire_softc *sc;
1425 uint64_t mr;
1426
1427 iv = arg;
1428 fica = iv->iv_icarg;
1429 sc = fica->fica_sc;
1430 mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1431 if ((mr & FO_PCI_IMAP_V) != 0) {
1432 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1433 FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1434 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1435 }
1436 while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1437 ;
1438 if ((mr & FO_PCI_IMAP_V) != 0)
1439 fire_intr_enable(arg);
1440 }
1441
1442 static void
1443 fire_intr_clear(void *arg)
1444 {
1445 struct intr_vector *iv;
1446 struct fire_icarg *fica;
1447
1448 iv = arg;
1449 fica = iv->iv_icarg;
1450 FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1451 }
1452
1453 /*
1454 * Given that the event queue implementation matches our current MD and MI
1455 * interrupt frameworks like square pegs fit into round holes we are generous
1456 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1457 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1458 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1459 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1460 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1461 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1462 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1463 */
1464
1465 static int
1466 fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1467 int *irqs)
1468 {
1469 struct fire_softc *sc;
1470 u_int i, j, msiqrun;
1471
1472 if (powerof2(count) == 0 || count > 32)
1473 return (EINVAL);
1474
1475 sc = device_get_softc(dev);
1476 mtx_lock(&sc->sc_msi_mtx);
1477 msiqrun = 0;
1478 for (i = 0; i < sc->sc_msiq_count; i++) {
1479 for (j = i; j < i + count; j++) {
1480 if (isclr(sc->sc_msiq_bitmap, j) == 0)
1481 break;
1482 }
1483 if (j == i + count) {
1484 msiqrun = i;
1485 break;
1486 }
1487 }
1488 if (i == sc->sc_msiq_count) {
1489 mtx_unlock(&sc->sc_msi_mtx);
1490 return (ENXIO);
1491 }
1492 for (i = 0; i + count < sc->sc_msi_count; i += count) {
1493 for (j = i; j < i + count; j++)
1494 if (isclr(sc->sc_msi_bitmap, j) == 0)
1495 break;
1496 if (j == i + count) {
1497 for (j = 0; j < count; j++) {
1498 setbit(sc->sc_msiq_bitmap, msiqrun + j);
1499 setbit(sc->sc_msi_bitmap, i + j);
1500 sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1501 irqs[j] = sc->sc_msi_first + i + j;
1502 }
1503 mtx_unlock(&sc->sc_msi_mtx);
1504 return (0);
1505 }
1506 }
1507 mtx_unlock(&sc->sc_msi_mtx);
1508 return (ENXIO);
1509 }
1510
1511 static int
1512 fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1513 {
1514 struct fire_softc *sc;
1515 u_int i;
1516
1517 sc = device_get_softc(dev);
1518 mtx_lock(&sc->sc_msi_mtx);
1519 for (i = 0; i < count; i++) {
1520 clrbit(sc->sc_msiq_bitmap,
1521 sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1522 clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1523 }
1524 mtx_unlock(&sc->sc_msi_mtx);
1525 return (0);
1526 }
1527
1528 static int
1529 fire_alloc_msix(device_t dev, device_t child, int *irq)
1530 {
1531 struct fire_softc *sc;
1532 int i, msiq;
1533
1534 sc = device_get_softc(dev);
1535 if ((sc->sc_flags & FIRE_MSIX) == 0)
1536 return (ENXIO);
1537 mtx_lock(&sc->sc_msi_mtx);
1538 msiq = 0;
1539 for (i = 0; i < sc->sc_msiq_count; i++) {
1540 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1541 msiq = i;
1542 break;
1543 }
1544 }
1545 if (i == sc->sc_msiq_count) {
1546 mtx_unlock(&sc->sc_msi_mtx);
1547 return (ENXIO);
1548 }
1549 for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1550 if (isclr(sc->sc_msi_bitmap, i) != 0) {
1551 setbit(sc->sc_msiq_bitmap, msiq);
1552 setbit(sc->sc_msi_bitmap, i);
1553 sc->sc_msi_msiq_table[i] = msiq;
1554 *irq = sc->sc_msi_first + i;
1555 mtx_unlock(&sc->sc_msi_mtx);
1556 return (0);
1557 }
1558 }
1559 mtx_unlock(&sc->sc_msi_mtx);
1560 return (ENXIO);
1561 }
1562
1563 static int
1564 fire_release_msix(device_t dev, device_t child, int irq)
1565 {
1566 struct fire_softc *sc;
1567
1568 sc = device_get_softc(dev);
1569 if ((sc->sc_flags & FIRE_MSIX) == 0)
1570 return (ENXIO);
1571 mtx_lock(&sc->sc_msi_mtx);
1572 clrbit(sc->sc_msiq_bitmap,
1573 sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1574 clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1575 mtx_unlock(&sc->sc_msi_mtx);
1576 return (0);
1577 }
1578
1579 static int
1580 fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1581 uint32_t *data)
1582 {
1583 struct fire_softc *sc;
1584 struct pci_devinfo *dinfo;
1585
1586 sc = device_get_softc(dev);
1587 dinfo = device_get_ivars(child);
1588 if (dinfo->cfg.msi.msi_alloc > 0) {
1589 if ((irq & ~sc->sc_msi_data_mask) != 0) {
1590 device_printf(dev, "invalid MSI 0x%x\n", irq);
1591 return (EINVAL);
1592 }
1593 } else {
1594 if ((sc->sc_flags & FIRE_MSIX) == 0)
1595 return (ENXIO);
1596 if (fls(irq) > sc->sc_msix_data_width) {
1597 device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1598 return (EINVAL);
1599 }
1600 }
1601 if (dinfo->cfg.msi.msi_alloc > 0 &&
1602 (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1603 *addr = sc->sc_msi_addr32;
1604 else
1605 *addr = sc->sc_msi_addr64;
1606 *data = irq;
1607 return (0);
1608 }
1609
1610 static void
1611 fire_msiq_handler(void *cookie)
1612 {
1613 struct intr_vector *iv;
1614 struct fire_msiqarg *fmqa;
1615
1616 iv = cookie;
1617 fmqa = iv->iv_icarg;
1618 /*
1619 * Note that since fire_intr_clear() will clear the event queue
1620 * interrupt after the handler associated with the MSI [sic] has
1621 * been executed we have to protect the access to the event queue as
1622 * otherwise nested event queue interrupts cause corruption of the
1623 * event queue on MP machines. Obviously especially when abandoning
1624 * the 1:1 mapping it would be better to not clear the event queue
1625 * interrupt after each handler invocation but only once when the
1626 * outstanding MSIs have been processed but unfortunately that
1627 * doesn't work well and leads to interrupt storms with controllers/
1628 * drivers which don't mask interrupts while the handler is executed.
1629 * Maybe delaying clearing the MSI until after the handler has been
1630 * executed could be used to work around this but that's not the
1631 * intended usage and might in turn cause lost MSIs.
1632 */
1633 mtx_lock_spin(&fmqa->fmqa_mtx);
1634 fire_msiq_common(iv, fmqa);
1635 mtx_unlock_spin(&fmqa->fmqa_mtx);
1636 }
1637
1638 static void
1639 fire_msiq_filter(void *cookie)
1640 {
1641 struct intr_vector *iv;
1642 struct fire_msiqarg *fmqa;
1643
1644 iv = cookie;
1645 fmqa = iv->iv_icarg;
1646 /*
1647 * For filters we don't use fire_intr_clear() since it would clear
1648 * the event queue interrupt while we're still processing the event
1649 * queue as filters and associated post-filter handler are executed
1650 * directly, which in turn would lead to lost MSIs. So we clear the
1651 * event queue interrupt only once after processing the event queue.
1652 * Given that this still guarantees the filters to not be executed
1653 * concurrently and no other CPU can clear the event queue interrupt
1654 * while the event queue is still processed, we don't even need to
1655 * interlock the access to the event queue in this case.
1656 */
1657 critical_enter();
1658 fire_msiq_common(iv, fmqa);
1659 FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1660 INTCLR_IDLE);
1661 critical_exit();
1662 }
1663
1664 static inline void
1665 fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1666 {
1667 struct fire_softc *sc;
1668 struct fo_msiq_record *qrec;
1669 device_t dev;
1670 uint64_t word0;
1671 u_int head, msi, msiq;
1672
1673 sc = fmqa->fmqa_fica.fica_sc;
1674 dev = sc->sc_dev;
1675 msiq = fmqa->fmqa_msiq;
1676 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1677 FO_PCI_EQ_HD_SHFT;
1678 qrec = &fmqa->fmqa_base[head];
1679 word0 = qrec->fomqr_word0;
1680 for (;;) {
1681 if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1682 break;
1683 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1684 (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1685 ("%s: received non-MSI/MSI-X message in event queue %d "
1686 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1687 (unsigned long long)word0));
1688 msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1689 FO_MQR_WORD0_DATA0_SHFT;
1690 /*
1691 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1692 */
1693 KASSERT(msi == fmqa->fmqa_msi,
1694 ("%s: received non-matching MSI/MSI-X in event queue %d "
1695 "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1696 fmqa->fmqa_msi));
1697 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1698 FO_PCI_MSI_CLR_EQWR_N);
1699 if (__predict_false(intr_event_handle(iv->iv_event,
1700 NULL) != 0))
1701 printf("stray MSI/MSI-X in event queue %d\n", msiq);
1702 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1703 head = (head + 1) % sc->sc_msiq_size;
1704 qrec = &fmqa->fmqa_base[head];
1705 word0 = qrec->fomqr_word0;
1706 }
1707 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1708 FO_PCI_EQ_HD_SHFT);
1709 if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1710 FO_PCI_EQ_TL_OVERR) != 0)) {
1711 device_printf(dev, "event queue %d overflow\n", msiq);
1712 msiq <<= 3;
1713 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1714 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1715 FO_PCI_EQ_CTRL_CLR_COVERR);
1716 }
1717 }
1718
1719 static int
1720 fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1721 int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1722 void **cookiep)
1723 {
1724 struct fire_softc *sc;
1725 struct fire_msiqarg *fmqa;
1726 u_long vec;
1727 int error;
1728 u_int msi, msiq;
1729
1730 sc = device_get_softc(dev);
1731 /*
1732 * XXX this assumes that a device only has one INTx, while in fact
1733 * Cassini+ and Saturn can use all four the firmware has assigned
1734 * to them, but so does pci(4).
1735 */
1736 if (rman_get_rid(ires) != 0) {
1737 msi = rman_get_start(ires);
1738 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1739 vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1740 msiq += sc->sc_msiq_first;
1741 if (intr_vectors[vec].iv_ic != &fire_ic) {
1742 device_printf(dev,
1743 "invalid interrupt controller for vector 0x%lx\n",
1744 vec);
1745 return (EINVAL);
1746 }
1747 /*
1748 * The MD interrupt code needs the vector rather than the MSI.
1749 */
1750 rman_set_start(ires, vec);
1751 rman_set_end(ires, vec);
1752 error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1753 intr, arg, cookiep);
1754 rman_set_start(ires, msi);
1755 rman_set_end(ires, msi);
1756 if (error != 0)
1757 return (error);
1758 fmqa = intr_vectors[vec].iv_icarg;
1759 /*
1760 * XXX inject our event queue handler.
1761 */
1762 if (filt != NULL) {
1763 intr_vectors[vec].iv_func = fire_msiq_filter;
1764 intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1765 /*
1766 * Ensure the event queue interrupt is cleared, it
1767 * might have triggered before. Given we supply NULL
1768 * as ic_clear, inthand_add() won't do this for us.
1769 */
1770 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1771 INTCLR_IDLE);
1772 } else
1773 intr_vectors[vec].iv_func = fire_msiq_handler;
1774 /* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1775 fmqa->fmqa_msi = msi;
1776 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1777 FO_PCI_EQ_CTRL_SET_EN);
1778 msi <<= 3;
1779 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1780 (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1781 ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1782 ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1783 FO_PCI_MSI_MAP_EQNUM_MASK));
1784 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1785 FO_PCI_MSI_CLR_EQWR_N);
1786 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1787 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1788 FO_PCI_MSI_MAP_V);
1789 return (error);
1790 }
1791
1792 /*
1793 * Make sure the vector is fully specified and we registered
1794 * our interrupt controller for it.
1795 */
1796 vec = rman_get_start(ires);
1797 if (INTIGN(vec) != sc->sc_ign) {
1798 device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1799 return (EINVAL);
1800 }
1801 if (intr_vectors[vec].iv_ic != &fire_ic) {
1802 device_printf(dev,
1803 "invalid interrupt controller for vector 0x%lx\n", vec);
1804 return (EINVAL);
1805 }
1806 return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1807 arg, cookiep));
1808 }
1809
1810 static int
1811 fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1812 void *cookie)
1813 {
1814 struct fire_softc *sc;
1815 u_long vec;
1816 int error;
1817 u_int msi, msiq;
1818
1819 sc = device_get_softc(dev);
1820 if (rman_get_rid(ires) != 0) {
1821 msi = rman_get_start(ires);
1822 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1823 vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1824 msiq += sc->sc_msiq_first;
1825 msi <<= 3;
1826 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1827 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1828 ~FO_PCI_MSI_MAP_V);
1829 msiq <<= 3;
1830 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1831 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1832 FO_PCI_EQ_CTRL_CLR_DIS);
1833 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1834 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1835 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1836 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1837 intr_vectors[vec].iv_ic = &fire_ic;
1838 /*
1839 * The MD interrupt code needs the vector rather than the MSI.
1840 */
1841 rman_set_start(ires, vec);
1842 rman_set_end(ires, vec);
1843 error = bus_generic_teardown_intr(dev, child, ires, cookie);
1844 msi >>= 3;
1845 rman_set_start(ires, msi);
1846 rman_set_end(ires, msi);
1847 return (error);
1848 }
1849 return (bus_generic_teardown_intr(dev, child, ires, cookie));
1850 }
1851
1852 static struct resource *
1853 fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
1854 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1855 {
1856 struct fire_softc *sc;
1857
1858 if (type == SYS_RES_IRQ && *rid == 0) {
1859 sc = device_get_softc(bus);
1860 start = end = INTMAP_VEC(sc->sc_ign, end);
1861 }
1862 return (ofw_pci_alloc_resource(bus, child, type, rid, start, end,
1863 count, flags));
1864 }
1865
1866 static u_int
1867 fire_get_timecount(struct timecounter *tc)
1868 {
1869 struct fire_softc *sc;
1870
1871 sc = tc->tc_priv;
1872 return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);
1873 }
Cache object: 70d3586f1d17e93a3a17adfcd2e3a792
|