FreeBSD/Linux Kernel Cross Reference
sys/sparc64/pci/fire.c
1 /*-
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
39 * bridges
40 */
41
42 #include "opt_fire.h"
43 #include "opt_ofw_pci.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/interrupt.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/pciio.h>
55 #include <sys/pcpu.h>
56 #include <sys/rman.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 #include <sys/timetc.h>
60
61 #include <dev/ofw/ofw_bus.h>
62 #include <dev/ofw/ofw_pci.h>
63 #include <dev/ofw/openfirm.h>
64
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67
68 #include <machine/bus.h>
69 #include <machine/bus_common.h>
70 #include <machine/bus_private.h>
71 #include <machine/fsr.h>
72 #include <machine/iommureg.h>
73 #include <machine/iommuvar.h>
74 #include <machine/pmap.h>
75 #include <machine/resource.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79
80 #include <sparc64/pci/ofw_pci.h>
81 #include <sparc64/pci/firereg.h>
82 #include <sparc64/pci/firevar.h>
83
84 #include "pcib_if.h"
85
86 struct fire_msiqarg;
87
88 static const struct fire_desc *fire_get_desc(device_t dev);
89 static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
90 bus_dmasync_op_t op);
91 static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
92 bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
93 static void fire_intr_assign(void *arg);
94 static void fire_intr_clear(void *arg);
95 static void fire_intr_disable(void *arg);
96 static void fire_intr_enable(void *arg);
97 static int fire_intr_register(struct fire_softc *sc, u_int ino);
98 static inline void fire_msiq_common(struct intr_vector *iv,
99 struct fire_msiqarg *fmqa);
100 static void fire_msiq_filter(void *cookie);
101 static void fire_msiq_handler(void *cookie);
102 static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
103 driver_filter_t handler, void *arg);
104 static timecounter_get_t fire_get_timecount;
105
106 /* Interrupt handlers */
107 static driver_filter_t fire_dmc_pec;
108 static driver_filter_t fire_pcie;
109 static driver_filter_t fire_xcb;
110
111 /*
112 * Methods
113 */
114 static bus_activate_resource_t fire_activate_resource;
115 static bus_adjust_resource_t fire_adjust_resource;
116 static pcib_alloc_msi_t fire_alloc_msi;
117 static pcib_alloc_msix_t fire_alloc_msix;
118 static bus_alloc_resource_t fire_alloc_resource;
119 static device_attach_t fire_attach;
120 static bus_get_dma_tag_t fire_get_dma_tag;
121 static ofw_bus_get_node_t fire_get_node;
122 static pcib_map_msi_t fire_map_msi;
123 static pcib_maxslots_t fire_maxslots;
124 static device_probe_t fire_probe;
125 static pcib_read_config_t fire_read_config;
126 static bus_read_ivar_t fire_read_ivar;
127 static pcib_release_msi_t fire_release_msi;
128 static pcib_release_msix_t fire_release_msix;
129 static pcib_route_interrupt_t fire_route_interrupt;
130 static bus_setup_intr_t fire_setup_intr;
131 static bus_teardown_intr_t fire_teardown_intr;
132 static pcib_write_config_t fire_write_config;
133
134 static device_method_t fire_methods[] = {
135 /* Device interface */
136 DEVMETHOD(device_probe, fire_probe),
137 DEVMETHOD(device_attach, fire_attach),
138 DEVMETHOD(device_shutdown, bus_generic_shutdown),
139 DEVMETHOD(device_suspend, bus_generic_suspend),
140 DEVMETHOD(device_resume, bus_generic_resume),
141
142 /* Bus interface */
143 DEVMETHOD(bus_read_ivar, fire_read_ivar),
144 DEVMETHOD(bus_setup_intr, fire_setup_intr),
145 DEVMETHOD(bus_teardown_intr, fire_teardown_intr),
146 DEVMETHOD(bus_alloc_resource, fire_alloc_resource),
147 DEVMETHOD(bus_activate_resource, fire_activate_resource),
148 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
149 DEVMETHOD(bus_adjust_resource, fire_adjust_resource),
150 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
151 DEVMETHOD(bus_get_dma_tag, fire_get_dma_tag),
152
153 /* pcib interface */
154 DEVMETHOD(pcib_maxslots, fire_maxslots),
155 DEVMETHOD(pcib_read_config, fire_read_config),
156 DEVMETHOD(pcib_write_config, fire_write_config),
157 DEVMETHOD(pcib_route_interrupt, fire_route_interrupt),
158 DEVMETHOD(pcib_alloc_msi, fire_alloc_msi),
159 DEVMETHOD(pcib_release_msi, fire_release_msi),
160 DEVMETHOD(pcib_alloc_msix, fire_alloc_msix),
161 DEVMETHOD(pcib_release_msix, fire_release_msix),
162 DEVMETHOD(pcib_map_msi, fire_map_msi),
163
164 /* ofw_bus interface */
165 DEVMETHOD(ofw_bus_get_node, fire_get_node),
166
167 DEVMETHOD_END
168 };
169
170 static devclass_t fire_devclass;
171
172 DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
173 EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
174 BUS_PASS_BUS);
175 MODULE_DEPEND(fire, nexus, 1, 1, 1);
176
177 static const struct intr_controller fire_ic = {
178 fire_intr_enable,
179 fire_intr_disable,
180 fire_intr_assign,
181 fire_intr_clear
182 };
183
184 struct fire_icarg {
185 struct fire_softc *fica_sc;
186 bus_addr_t fica_map;
187 bus_addr_t fica_clr;
188 };
189
190 static const struct intr_controller fire_msiqc_filter = {
191 fire_intr_enable,
192 fire_intr_disable,
193 fire_intr_assign,
194 NULL
195 };
196
197 struct fire_msiqarg {
198 struct fire_icarg fmqa_fica;
199 struct mtx fmqa_mtx;
200 struct fo_msiq_record *fmqa_base;
201 uint64_t fmqa_head;
202 uint64_t fmqa_tail;
203 uint32_t fmqa_msiq;
204 uint32_t fmqa_msi;
205 };
206
207 #define FIRE_PERF_CNT_QLTY 100
208
209 #define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \
210 bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
211 #define FIRE_SPC_READ_8(spc, sc, offs) \
212 bus_read_8((sc)->sc_mem_res[(spc)], (offs))
213 #define FIRE_SPC_WRITE_8(spc, sc, offs, v) \
214 bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
215
216 #ifndef FIRE_DEBUG
217 #define FIRE_SPC_SET(spc, sc, offs, reg, v) \
218 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
219 #else
220 #define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \
221 device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \
222 (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \
223 (unsigned long long)(v)); \
224 FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \
225 } while (0)
226 #endif
227
228 #define FIRE_PCI_BARRIER(sc, offs, len, flags) \
229 FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
230 #define FIRE_PCI_READ_8(sc, offs) \
231 FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
232 #define FIRE_PCI_WRITE_8(sc, offs, v) \
233 FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
234 #define FIRE_CTRL_BARRIER(sc, offs, len, flags) \
235 FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
236 #define FIRE_CTRL_READ_8(sc, offs) \
237 FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
238 #define FIRE_CTRL_WRITE_8(sc, offs, v) \
239 FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
240
241 #define FIRE_PCI_SET(sc, offs, v) \
242 FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
243 #define FIRE_CTRL_SET(sc, offs, v) \
244 FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
245
246 struct fire_desc {
247 const char *fd_string;
248 int fd_mode;
249 const char *fd_name;
250 };
251
252 static const struct fire_desc fire_compats[] = {
253 { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" },
254 #if 0
255 { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" },
256 #endif
257 { NULL, 0, NULL }
258 };
259
260 static const struct fire_desc *
261 fire_get_desc(device_t dev)
262 {
263 const struct fire_desc *desc;
264 const char *compat;
265
266 compat = ofw_bus_get_compat(dev);
267 if (compat == NULL)
268 return (NULL);
269 for (desc = fire_compats; desc->fd_string != NULL; desc++)
270 if (strcmp(desc->fd_string, compat) == 0)
271 return (desc);
272 return (NULL);
273 }
274
275 static int
276 fire_probe(device_t dev)
277 {
278 const char *dtype;
279
280 dtype = ofw_bus_get_type(dev);
281 if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
282 fire_get_desc(dev) != NULL) {
283 device_set_desc(dev, "Sun Host-PCIe bridge");
284 return (BUS_PROBE_GENERIC);
285 }
286 return (ENXIO);
287 }
288
289 static int
290 fire_attach(device_t dev)
291 {
292 struct fire_softc *sc;
293 const struct fire_desc *desc;
294 struct ofw_pci_msi_ranges msi_ranges;
295 struct ofw_pci_msi_addr_ranges msi_addr_ranges;
296 struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
297 struct fire_msiqarg *fmqa;
298 struct timecounter *tc;
299 struct ofw_pci_ranges *range;
300 uint64_t ino_bitmap, val;
301 phandle_t node;
302 uint32_t prop, prop_array[2];
303 int i, j, mode;
304 u_int lw;
305 uint16_t mps;
306
307 sc = device_get_softc(dev);
308 node = ofw_bus_get_node(dev);
309 desc = fire_get_desc(dev);
310 mode = desc->fd_mode;
311
312 sc->sc_dev = dev;
313 sc->sc_node = node;
314 sc->sc_mode = mode;
315 sc->sc_flags = 0;
316
317 mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
318 mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
319
320 /*
321 * Fire and Oberon have two register banks:
322 * (0) per-PBM PCI Express configuration and status registers
323 * (1) (shared) Fire/Oberon controller configuration and status
324 * registers
325 */
326 for (i = 0; i < FIRE_NREG; i++) {
327 j = i;
328 sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
329 SYS_RES_MEMORY, &j, RF_ACTIVE);
330 if (sc->sc_mem_res[i] == NULL)
331 panic("%s: could not allocate register bank %d",
332 __func__, i);
333 }
334
335 if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
336 panic("%s: could not determine IGN", __func__);
337 if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
338 panic("%s: could not determine module-revision", __func__);
339
340 device_printf(dev, "%s, module-revision %d, IGN %#x\n",
341 desc->fd_name, prop, sc->sc_ign);
342
343 /*
344 * Hunt through all the interrupt mapping regs and register
345 * the interrupt controller for our interrupt vectors. We do
346 * this early in order to be able to catch stray interrupts.
347 */
348 i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
349 sizeof(prop_array));
350 if (i == -1)
351 panic("%s: could not get ino-bitmap", __func__);
352 ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
353 for (i = 0; i <= FO_MAX_INO; i++) {
354 if ((ino_bitmap & (1ULL << i)) == 0)
355 continue;
356 j = fire_intr_register(sc, i);
357 if (j != 0)
358 device_printf(dev, "could not register interrupt "
359 "controller for INO %d (%d)\n", i, j);
360 }
361
362 /* JBC/UBC module initialization */
363 FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
364 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
365 /* not enabled by OpenSolaris */
366 FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
367 if (sc->sc_mode == FIRE_MODE_FIRE) {
368 FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
369 FIRE_JBUS_PAR_CTRL_P_EN);
370 FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
371 ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
372 FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
373 FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
374 FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
375 FIRE_JBC_FATAL_RST_EN_APE_P_INT |
376 FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
377 FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
378 FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
379 FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
380 FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
381 }
382
383 /* TLU initialization */
384 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
385 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
386 /* not enabled by OpenSolaris */
387 FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
388 FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
389 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
390 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
391 /* not enabled by OpenSolaris */
392 FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
393 FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
394 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
395 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
396 /* not enabled by OpenSolaris */
397 FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
398 FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
399 val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
400 ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
401 FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
402 ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
403 FO_PCI_TLU_CTRL_CFG_MASK);
404 if (sc->sc_mode == FIRE_MODE_OBERON)
405 val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
406 val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
407 FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
408 FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
409 FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
410
411 /* DLU/LPU initialization */
412 if (sc->sc_mode == FIRE_MODE_OBERON)
413 FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
414 else
415 FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
416 FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
417 FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
418 FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
419 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
420 FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
421 if (sc->sc_mode == FIRE_MODE_OBERON)
422 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
423 (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
424 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
425 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
426 else {
427 switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
428 FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
429 FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
430 case 1:
431 lw = 0;
432 break;
433 case 4:
434 lw = 1;
435 break;
436 case 8:
437 lw = 2;
438 break;
439 case 16:
440 lw = 3;
441 break;
442 default:
443 lw = 0;
444 }
445 mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
446 FO_PCI_TLU_CTRL_CFG_MPS_MASK) >>
447 FO_PCI_TLU_CTRL_CFG_MPS_SHFT;
448 i = sizeof(fire_freq_nak_tmr_thrs) /
449 sizeof(*fire_freq_nak_tmr_thrs);
450 if (mps >= i)
451 mps = i - 1;
452 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
453 (fire_freq_nak_tmr_thrs[mps][lw] <<
454 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
455 FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
456 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
457 (fire_rply_tmr_thrs[mps][lw] <<
458 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
459 FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
460 FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
461 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
462 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
463 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
464 ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
465 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
466 FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
467 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
468 (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
469 FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
470 FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
471 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
472 (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
473 FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
474 FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
475 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
476 ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
477 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
478 FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
479 ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
480 FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
481 FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
482 FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
483 }
484
485 /* ILU initialization */
486 FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
487 /* not enabled by OpenSolaris */
488 FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
489
490 /* IMU initialization */
491 FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
492 FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
493 FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
494 ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
495 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
496 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
497 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
498 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
499 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
500
501 /* MMU initialization */
502 FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
503 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
504 /* not enabled by OpenSolaris */
505 FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
506 FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
507
508 /* DMC initialization */
509 FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
510 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
511 FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
512
513 /* PEC initialization */
514 FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
515
516 /* Establish handlers for interesting interrupts. */
517 if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
518 fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
519 if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
520 fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
521
522 /* MSI/MSI-X support */
523 if (OF_getprop(node, "#msi", &sc->sc_msi_count,
524 sizeof(sc->sc_msi_count)) == -1)
525 panic("%s: could not determine MSI count", __func__);
526 if (OF_getprop(node, "msi-ranges", &msi_ranges,
527 sizeof(msi_ranges)) == -1)
528 sc->sc_msi_first = 0;
529 else
530 sc->sc_msi_first = msi_ranges.first;
531 if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
532 sizeof(sc->sc_msi_data_mask)) == -1)
533 panic("%s: could not determine MSI data mask", __func__);
534 if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
535 sizeof(sc->sc_msix_data_width)) > 0)
536 sc->sc_flags |= FIRE_MSIX;
537 if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
538 sizeof(msi_addr_ranges)) == -1)
539 panic("%s: could not determine MSI address ranges", __func__);
540 sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
541 sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
542 if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
543 sizeof(sc->sc_msiq_count)) == -1)
544 panic("%s: could not determine MSI event queue count",
545 __func__);
546 if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
547 sizeof(sc->sc_msiq_size)) == -1)
548 panic("%s: could not determine MSI event queue size",
549 __func__);
550 if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
551 sizeof(msi_eq_to_devino)) == -1 &&
552 OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
553 sizeof(msi_eq_to_devino)) == -1) {
554 sc->sc_msiq_first = 0;
555 sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
556 } else {
557 sc->sc_msiq_first = msi_eq_to_devino.eq_first;
558 sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
559 }
560 if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
561 sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
562 panic("%s: event queues exceed INO range", __func__);
563 sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
564 M_DEVBUF, M_NOWAIT | M_ZERO);
565 if (sc->sc_msi_bitmap == NULL)
566 panic("%s: could not malloc MSI bitmap", __func__);
567 sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
568 sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
569 if (sc->sc_msi_msiq_table == NULL)
570 panic("%s: could not malloc MSI-MSI event queue table",
571 __func__);
572 sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
573 M_DEVBUF, M_NOWAIT | M_ZERO);
574 if (sc->sc_msiq_bitmap == NULL)
575 panic("%s: could not malloc MSI event queue bitmap", __func__);
576 j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
577 sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
578 FO_EQ_ALIGNMENT, 0);
579 if (sc->sc_msiq == NULL)
580 panic("%s: could not contigmalloc MSI event queue", __func__);
581 memset(sc->sc_msiq, 0, j);
582 FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
583 (pmap_kextract((vm_offset_t)sc->sc_msiq) &
584 FO_PCI_EQ_BASE_ADDR_MASK));
585 for (i = 0; i < sc->sc_msi_count; i++) {
586 j = (i + sc->sc_msi_first) << 3;
587 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
588 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
589 ~FO_PCI_MSI_MAP_V);
590 }
591 for (i = 0; i < sc->sc_msiq_count; i++) {
592 j = i + sc->sc_msiq_ino_first;
593 if ((ino_bitmap & (1ULL << j)) == 0) {
594 mtx_lock(&sc->sc_msi_mtx);
595 setbit(sc->sc_msiq_bitmap, i);
596 mtx_unlock(&sc->sc_msi_mtx);
597 }
598 fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
599 mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
600 fmqa->fmqa_base =
601 (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
602 (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
603 j = i + sc->sc_msiq_first;
604 fmqa->fmqa_msiq = j;
605 j <<= 3;
606 fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
607 fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
608 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
609 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
610 FO_PCI_EQ_CTRL_CLR_DIS);
611 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
612 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
613 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
614 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
615 }
616 FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
617 FO_PCI_MSI_32_BIT_ADDR_MASK);
618 FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
619 FO_PCI_MSI_64_BIT_ADDR_MASK);
620
621 /*
622 * Establish a handler for interesting PCIe messages and disable
623 * unintersting ones.
624 */
625 mtx_lock(&sc->sc_msi_mtx);
626 for (i = 0; i < sc->sc_msiq_count; i++) {
627 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
628 j = i;
629 break;
630 }
631 }
632 if (i == sc->sc_msiq_count) {
633 mtx_unlock(&sc->sc_msi_mtx);
634 panic("%s: no spare event queue for PCIe messages", __func__);
635 }
636 setbit(sc->sc_msiq_bitmap, j);
637 mtx_unlock(&sc->sc_msi_mtx);
638 i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
639 if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
640 panic("%s: failed to add interrupt for PCIe messages",
641 __func__);
642 fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
643 j += sc->sc_msiq_first;
644 /*
645 * "Please note that setting the EQNUM field to a value larger than
646 * 35 will yield unpredictable results."
647 */
648 if (j > 35)
649 panic("%s: invalid queue for PCIe messages (%d)",
650 __func__, j);
651 FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
652 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
653 FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
654 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
655 FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
656 ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
657 FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
658 FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
659 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
660 FO_PCI_EQ_CTRL_SET_EN);
661
662 #define TC_COUNTER_MAX_MASK 0xffffffff
663
664 /*
665 * Setup JBC/UBC performance counter 0 in bus cycle counting
666 * mode as timecounter.
667 */
668 if (device_get_unit(dev) == 0) {
669 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
670 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
671 FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
672 (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
673 (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
674 tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
675 if (tc == NULL)
676 panic("%s: could not malloc timecounter", __func__);
677 tc->tc_get_timecount = fire_get_timecount;
678 tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
679 if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
680 sizeof(prop)) == -1)
681 panic("%s: could not determine clock frequency",
682 __func__);
683 tc->tc_frequency = prop;
684 tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
685 tc->tc_priv = sc;
686 /*
687 * Due to initial problems with the JBus-driven performance
688 * counters not advancing which might be firmware dependent
689 * ensure that it actually works.
690 */
691 if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0)
692 tc->tc_quality = FIRE_PERF_CNT_QLTY;
693 else
694 tc->tc_quality = -FIRE_PERF_CNT_QLTY;
695 tc_init(tc);
696 }
697
698 /*
699 * Set up the IOMMU. Both Fire and Oberon have one per PBM, but
700 * neither has a streaming buffer.
701 */
702 memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
703 sizeof(sc->sc_dma_methods));
704 sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
705 if (sc->sc_mode == FIRE_MODE_OBERON) {
706 sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
707 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
708 } else {
709 sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
710 sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
711 }
712 sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
713 /* Punch in our copies. */
714 sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
715 sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
716 sc->sc_is.is_iommu = FO_PCI_MMU;
717 val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
718 iommu_init(device_get_nameunit(sc->sc_dev), &sc->sc_is, 7, -1, 0);
719 #ifdef FIRE_DEBUG
720 device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
721 (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
722 #endif
723
724 /* Initialize memory and I/O rmans. */
725 sc->sc_pci_io_rman.rm_type = RMAN_ARRAY;
726 sc->sc_pci_io_rman.rm_descr = "Fire PCI I/O Ports";
727 if (rman_init(&sc->sc_pci_io_rman) != 0 ||
728 rman_manage_region(&sc->sc_pci_io_rman, 0, FO_IO_SIZE) != 0)
729 panic("%s: failed to set up I/O rman", __func__);
730 sc->sc_pci_mem_rman.rm_type = RMAN_ARRAY;
731 sc->sc_pci_mem_rman.rm_descr = "Fire PCI Memory";
732 if (rman_init(&sc->sc_pci_mem_rman) != 0 ||
733 rman_manage_region(&sc->sc_pci_mem_rman, 0, FO_MEM_SIZE) != 0)
734 panic("%s: failed to set up memory rman", __func__);
735
736 i = OF_getprop_alloc(node, "ranges", sizeof(*range), (void **)&range);
737 /*
738 * Make sure that the expected ranges are present. The
739 * OFW_PCI_CS_MEM64 one is not currently used though.
740 */
741 if (i != FIRE_NRANGE)
742 panic("%s: unsupported number of ranges", __func__);
743 /*
744 * Find the addresses of the various bus spaces.
745 * There should not be multiple ones of one kind.
746 * The physical start addresses of the ranges are the configuration,
747 * memory and I/O handles.
748 */
749 for (i = 0; i < FIRE_NRANGE; i++) {
750 j = OFW_PCI_RANGE_CS(&range[i]);
751 if (sc->sc_pci_bh[j] != 0)
752 panic("%s: duplicate range for space %d",
753 __func__, j);
754 sc->sc_pci_bh[j] = OFW_PCI_RANGE_PHYS(&range[i]);
755 }
756 free(range, M_OFWPROP);
757
758 /* Allocate our tags. */
759 sc->sc_pci_iot = sparc64_alloc_bus_tag(NULL, rman_get_bustag(
760 sc->sc_mem_res[FIRE_PCI]), PCI_IO_BUS_SPACE, NULL);
761 if (sc->sc_pci_iot == NULL)
762 panic("%s: could not allocate PCI I/O tag", __func__);
763 sc->sc_pci_cfgt = sparc64_alloc_bus_tag(NULL, rman_get_bustag(
764 sc->sc_mem_res[FIRE_PCI]), PCI_CONFIG_BUS_SPACE, NULL);
765 if (sc->sc_pci_cfgt == NULL)
766 panic("%s: could not allocate PCI configuration space tag",
767 __func__);
768 if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000,
769 sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
770 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0)
771 panic("%s: could not create PCI DMA tag", __func__);
772 /* Customize the tag. */
773 sc->sc_pci_dmat->dt_cookie = &sc->sc_is;
774 sc->sc_pci_dmat->dt_mt = &sc->sc_dma_methods;
775
776 /*
777 * Get the bus range from the firmware.
778 * NB: Neither Fire nor Oberon support PCI bus reenumeration.
779 */
780 i = OF_getprop(node, "bus-range", (void *)prop_array,
781 sizeof(prop_array));
782 if (i == -1)
783 panic("%s: could not get bus-range", __func__);
784 if (i != sizeof(prop_array))
785 panic("%s: broken bus-range (%d)", __func__, i);
786 sc->sc_pci_secbus = prop_array[0];
787 sc->sc_pci_subbus = prop_array[1];
788 if (bootverbose != 0)
789 device_printf(dev, "bus range %u to %u; PCI bus %d\n",
790 sc->sc_pci_secbus, sc->sc_pci_subbus, sc->sc_pci_secbus);
791
792 ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(ofw_pci_intr_t));
793
794 #define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \
795 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \
796 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \
797 (name), CTLFLAG_RD, (arg), 0, (desc))
798
799 FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
800 "ILU unknown errors");
801 FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
802 "JBC correctable errors");
803 FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
804 "JBC unsolicited interrupt ACK/NACK errors");
805 FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
806 "JBC unsolicited read response errors");
807 FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
808 FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
809 "DLU/TLU correctable errors");
810 FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
811 &sc->sc_stats_tlu_oe_non_fatal,
812 "DLU/TLU other event non-fatal errors summary"),
813 FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
814 "DLU/TLU receive other event errors"),
815 FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
816 "DLU/TLU transmit other event errors"),
817 FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
818 "UBC DMARDUE erros");
819
820 #undef FIRE_SYSCTL_ADD_UINT
821
822 device_add_child(dev, "pci", -1);
823 return (bus_generic_attach(dev));
824 }
825
826 static void
827 fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
828 driver_filter_t handler, void *arg)
829 {
830 u_long vec;
831 int rid;
832
833 rid = index;
834 sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
835 SYS_RES_IRQ, &rid, RF_ACTIVE);
836 if (sc->sc_irq_res[index] == NULL ||
837 INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
838 INTIGN(vec) != sc->sc_ign ||
839 intr_vectors[vec].iv_ic != &fire_ic ||
840 bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
841 INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
842 &sc->sc_ihand[index]) != 0)
843 panic("%s: failed to set up interrupt %d", __func__, index);
844 }
845
846 static int
847 fire_intr_register(struct fire_softc *sc, u_int ino)
848 {
849 struct fire_icarg *fica;
850 bus_addr_t intrclr, intrmap;
851 int error;
852
853 if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
854 return (ENXIO);
855 fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
856 sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
857 M_NOWAIT | M_ZERO);
858 if (fica == NULL)
859 return (ENOMEM);
860 fica->fica_sc = sc;
861 fica->fica_map = intrmap;
862 fica->fica_clr = intrclr;
863 error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
864 &fire_ic, fica));
865 if (error != 0)
866 free(fica, M_DEVBUF);
867 return (error);
868 }
869
870 static int
871 fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
872 bus_addr_t *intrclrptr)
873 {
874
875 if (ino > FO_MAX_INO) {
876 device_printf(sc->sc_dev, "out of range INO %d requested\n",
877 ino);
878 return (0);
879 }
880
881 ino <<= 3;
882 if (intrmapptr != NULL)
883 *intrmapptr = FO_PCI_INT_MAP_BASE + ino;
884 if (intrclrptr != NULL)
885 *intrclrptr = FO_PCI_INT_CLR_BASE + ino;
886 return (1);
887 }
888
889 /*
890 * Interrupt handlers
891 */
892 static int
893 fire_dmc_pec(void *arg)
894 {
895 struct fire_softc *sc;
896 device_t dev;
897 uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
898 uint64_t mmutfsr, oestat, pecstat, uestat, val;
899 u_int fatal, oenfatal;
900
901 fatal = 0;
902 sc = arg;
903 dev = sc->sc_dev;
904 mtx_lock_spin(&sc->sc_pcib_mtx);
905 mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
906 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
907 dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
908 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
909 imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
910 device_printf(dev, "IMU error %#llx\n",
911 (unsigned long long)imustat);
912 if ((imustat &
913 FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
914 fatal = 1;
915 val = FIRE_PCI_READ_8(sc,
916 FO_PCI_IMU_SCS_ERR_LOG);
917 device_printf(dev, "SCS error log %#llx\n",
918 (unsigned long long)val);
919 }
920 if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
921 fatal = 1;
922 val = FIRE_PCI_READ_8(sc,
923 FO_PCI_IMU_EQS_ERR_LOG);
924 device_printf(dev, "EQS error log %#llx\n",
925 (unsigned long long)val);
926 }
927 if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
928 FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
929 FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
930 FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
931 FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
932 FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
933 FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
934 FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
935 fatal = 1;
936 val = FIRE_PCI_READ_8(sc,
937 FO_PCI_IMU_RDS_ERR_LOG);
938 device_printf(dev, "RDS error log %#llx\n",
939 (unsigned long long)val);
940 }
941 }
942 if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
943 fatal = 1;
944 mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
945 mmutfar = FIRE_PCI_READ_8(sc,
946 FO_PCI_MMU_TRANS_FAULT_ADDR);
947 mmutfsr = FIRE_PCI_READ_8(sc,
948 FO_PCI_MMU_TRANS_FAULT_STAT);
949 if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
950 FO_PCI_MMU_ERR_INT_TBW_ERR_P |
951 FO_PCI_MMU_ERR_INT_TBW_UDE_P |
952 FO_PCI_MMU_ERR_INT_TBW_DME_P |
953 FO_PCI_MMU_ERR_INT_TTC_CAE_P |
954 FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
955 OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
956 FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
957 fatal = 1;
958 else {
959 sc->sc_stats_mmu_err++;
960 FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
961 mmustat);
962 }
963 device_printf(dev,
964 "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
965 (unsigned long long)mmustat,
966 (unsigned long long)mmutfar,
967 (unsigned long long)mmutfsr);
968 }
969 }
970 if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
971 pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
972 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
973 fatal = 1;
974 uestat = FIRE_PCI_READ_8(sc,
975 FO_PCI_TLU_UERR_INT_STAT);
976 device_printf(dev,
977 "DLU/TLU uncorrectable error %#llx\n",
978 (unsigned long long)uestat);
979 if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
980 OBERON_PCI_TLU_UERR_INT_POIS_P |
981 FO_PCI_TLU_UERR_INT_MFP_P |
982 FO_PCI_TLU_UERR_INT_ROF_P |
983 FO_PCI_TLU_UERR_INT_UC_P |
984 FIRE_PCI_TLU_UERR_INT_PP_P |
985 OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
986 val = FIRE_PCI_READ_8(sc,
987 FO_PCI_TLU_RX_UERR_HDR1_LOG);
988 device_printf(dev,
989 "receive header log %#llx\n",
990 (unsigned long long)val);
991 val = FIRE_PCI_READ_8(sc,
992 FO_PCI_TLU_RX_UERR_HDR2_LOG);
993 device_printf(dev,
994 "receive header log 2 %#llx\n",
995 (unsigned long long)val);
996 }
997 if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
998 val = FIRE_PCI_READ_8(sc,
999 FO_PCI_TLU_TX_UERR_HDR1_LOG);
1000 device_printf(dev,
1001 "transmit header log %#llx\n",
1002 (unsigned long long)val);
1003 val = FIRE_PCI_READ_8(sc,
1004 FO_PCI_TLU_TX_UERR_HDR2_LOG);
1005 device_printf(dev,
1006 "transmit header log 2 %#llx\n",
1007 (unsigned long long)val);
1008 }
1009 if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
1010 val = FIRE_PCI_READ_8(sc,
1011 FO_PCI_LPU_LNK_LYR_INT_STAT);
1012 device_printf(dev,
1013 "link layer interrupt and status %#llx\n",
1014 (unsigned long long)val);
1015 }
1016 if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
1017 val = FIRE_PCI_READ_8(sc,
1018 FO_PCI_LPU_PHY_LYR_INT_STAT);
1019 device_printf(dev,
1020 "phy layer interrupt and status %#llx\n",
1021 (unsigned long long)val);
1022 }
1023 }
1024 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
1025 sc->sc_stats_tlu_ce++;
1026 cestat = FIRE_PCI_READ_8(sc,
1027 FO_PCI_TLU_CERR_INT_STAT);
1028 device_printf(dev,
1029 "DLU/TLU correctable error %#llx\n",
1030 (unsigned long long)cestat);
1031 val = FIRE_PCI_READ_8(sc,
1032 FO_PCI_LPU_LNK_LYR_INT_STAT);
1033 device_printf(dev,
1034 "link layer interrupt and status %#llx\n",
1035 (unsigned long long)val);
1036 if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
1037 FIRE_PCI_WRITE_8(sc,
1038 FO_PCI_LPU_LNK_LYR_INT_STAT, val);
1039 val = FIRE_PCI_READ_8(sc,
1040 FO_PCI_LPU_PHY_LYR_INT_STAT);
1041 device_printf(dev,
1042 "phy layer interrupt and status %#llx\n",
1043 (unsigned long long)val);
1044 }
1045 FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
1046 cestat);
1047 }
1048 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
1049 oenfatal = 0;
1050 oestat = FIRE_PCI_READ_8(sc,
1051 FO_PCI_TLU_OEVENT_INT_STAT);
1052 device_printf(dev, "DLU/TLU other event %#llx\n",
1053 (unsigned long long)oestat);
1054 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1055 FO_PCI_TLU_OEVENT_MRC_P |
1056 FO_PCI_TLU_OEVENT_WUC_P |
1057 FO_PCI_TLU_OEVENT_RUC_P |
1058 FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
1059 val = FIRE_PCI_READ_8(sc,
1060 FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
1061 device_printf(dev,
1062 "receive header log %#llx\n",
1063 (unsigned long long)val);
1064 val = FIRE_PCI_READ_8(sc,
1065 FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
1066 device_printf(dev,
1067 "receive header log 2 %#llx\n",
1068 (unsigned long long)val);
1069 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1070 FO_PCI_TLU_OEVENT_MRC_P |
1071 FO_PCI_TLU_OEVENT_WUC_P |
1072 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1073 fatal = 1;
1074 else {
1075 sc->sc_stats_tlu_oe_rx_err++;
1076 oenfatal = 1;
1077 }
1078 }
1079 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1080 FO_PCI_TLU_OEVENT_CTO_P |
1081 FO_PCI_TLU_OEVENT_WUC_P |
1082 FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1083 val = FIRE_PCI_READ_8(sc,
1084 FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1085 device_printf(dev,
1086 "transmit header log %#llx\n",
1087 (unsigned long long)val);
1088 val = FIRE_PCI_READ_8(sc,
1089 FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1090 device_printf(dev,
1091 "transmit header log 2 %#llx\n",
1092 (unsigned long long)val);
1093 if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1094 FO_PCI_TLU_OEVENT_CTO_P |
1095 FO_PCI_TLU_OEVENT_WUC_P |
1096 FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1097 fatal = 1;
1098 else {
1099 sc->sc_stats_tlu_oe_tx_err++;
1100 oenfatal = 1;
1101 }
1102 }
1103 if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1104 FO_PCI_TLU_OEVENT_EMP_P |
1105 FO_PCI_TLU_OEVENT_EPE_P |
1106 FIRE_PCI_TLU_OEVENT_ERP_P |
1107 OBERON_PCI_TLU_OEVENT_ERBU_P |
1108 FIRE_PCI_TLU_OEVENT_EIP_P |
1109 OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1110 fatal = 1;
1111 val = FIRE_PCI_READ_8(sc,
1112 FO_PCI_LPU_LNK_LYR_INT_STAT);
1113 device_printf(dev,
1114 "link layer interrupt and status %#llx\n",
1115 (unsigned long long)val);
1116 }
1117 if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1118 FO_PCI_TLU_OEVENT_EDP_P |
1119 FIRE_PCI_TLU_OEVENT_EHP_P |
1120 OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1121 FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1122 fatal = 1;
1123 if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1124 FO_PCI_TLU_OEVENT_LWC_P |
1125 FO_PCI_TLU_OEVENT_LIN_P |
1126 FO_PCI_TLU_OEVENT_LRS_P |
1127 FO_PCI_TLU_OEVENT_LDN_P |
1128 FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1129 oenfatal = 1;
1130 if (oenfatal != 0) {
1131 sc->sc_stats_tlu_oe_non_fatal++;
1132 FIRE_PCI_WRITE_8(sc,
1133 FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1134 if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1135 FIRE_PCI_WRITE_8(sc,
1136 FO_PCI_LPU_LNK_LYR_INT_STAT,
1137 FIRE_PCI_READ_8(sc,
1138 FO_PCI_LPU_LNK_LYR_INT_STAT));
1139 }
1140 }
1141 if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1142 ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1143 device_printf(dev, "ILU error %#llx\n",
1144 (unsigned long long)ilustat);
1145 if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1146 FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1147 fatal = 1;
1148 else {
1149 sc->sc_stats_ilu_err++;
1150 FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1151 ilustat);
1152 }
1153 }
1154 }
1155 mtx_unlock_spin(&sc->sc_pcib_mtx);
1156 if (fatal != 0)
1157 panic("%s: fatal DMC/PEC error",
1158 device_get_nameunit(sc->sc_dev));
1159 return (FILTER_HANDLED);
1160 }
1161
1162 static int
1163 fire_xcb(void *arg)
1164 {
1165 struct fire_softc *sc;
1166 device_t dev;
1167 uint64_t errstat, intstat, val;
1168 u_int fatal;
1169
1170 fatal = 0;
1171 sc = arg;
1172 dev = sc->sc_dev;
1173 mtx_lock_spin(&sc->sc_pcib_mtx);
1174 if (sc->sc_mode == FIRE_MODE_OBERON) {
1175 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1176 device_printf(dev, "UBC error: interrupt status %#llx\n",
1177 (unsigned long long)intstat);
1178 if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1179 OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1180 fatal = 1;
1181 else
1182 sc->sc_stats_ubc_dmardue++;
1183 if (fatal != 0) {
1184 mtx_unlock_spin(&sc->sc_pcib_mtx);
1185 panic("%s: fatal UBC core block error",
1186 device_get_nameunit(sc->sc_dev));
1187 } else {
1188 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1189 mtx_unlock_spin(&sc->sc_pcib_mtx);
1190 }
1191 } else {
1192 errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1193 if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1194 FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1195 FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1196 intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1197 device_printf(dev, "JBC interrupt status %#llx\n",
1198 (unsigned long long)intstat);
1199 if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1200 val = FIRE_CTRL_READ_8(sc,
1201 FIRE_JBC_CSR_ERR_LOG);
1202 device_printf(dev, "CSR error log %#llx\n",
1203 (unsigned long long)val);
1204 }
1205 if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1206 FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1207 if ((intstat &
1208 FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1209 sc->sc_stats_jbc_unsol_rd++;
1210 if ((intstat &
1211 FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1212 sc->sc_stats_jbc_unsol_int++;
1213 val = FIRE_CTRL_READ_8(sc,
1214 FIRE_DMCINT_IDC_ERR_LOG);
1215 device_printf(dev,
1216 "DMCINT IDC error log %#llx\n",
1217 (unsigned long long)val);
1218 }
1219 if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1220 FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1221 fatal = 1;
1222 val = FIRE_CTRL_READ_8(sc,
1223 FIRE_MERGE_TRANS_ERR_LOG);
1224 device_printf(dev,
1225 "merge transaction error log %#llx\n",
1226 (unsigned long long)val);
1227 }
1228 if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1229 fatal = 1;
1230 val = FIRE_CTRL_READ_8(sc,
1231 FIRE_JBCINT_OTRANS_ERR_LOG);
1232 device_printf(dev,
1233 "JBCINT out transaction error log "
1234 "%#llx\n", (unsigned long long)val);
1235 val = FIRE_CTRL_READ_8(sc,
1236 FIRE_JBCINT_OTRANS_ERR_LOG2);
1237 device_printf(dev,
1238 "JBCINT out transaction error log 2 "
1239 "%#llx\n", (unsigned long long)val);
1240 }
1241 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1242 FIRE_JBC_ERR_INT_CE_ASYN_P |
1243 FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1244 FIRE_JBC_ERR_INT_JUE_P |
1245 FIRE_JBC_ERR_INT_ICISE_P |
1246 FIRE_JBC_ERR_INT_WR_DPE_P |
1247 FIRE_JBC_ERR_INT_RD_DPE_P |
1248 FIRE_JBC_ERR_INT_ILL_BMW_P |
1249 FIRE_JBC_ERR_INT_ILL_BMR_P |
1250 FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1251 if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1252 FIRE_JBC_ERR_INT_JTE_P |
1253 FIRE_JBC_ERR_INT_JBE_P |
1254 FIRE_JBC_ERR_INT_JUE_P |
1255 FIRE_JBC_ERR_INT_ICISE_P |
1256 FIRE_JBC_ERR_INT_WR_DPE_P |
1257 FIRE_JBC_ERR_INT_RD_DPE_P |
1258 FIRE_JBC_ERR_INT_ILL_BMW_P |
1259 FIRE_JBC_ERR_INT_ILL_BMR_P |
1260 FIRE_JBC_ERR_INT_BJC_P)) != 0)
1261 fatal = 1;
1262 else
1263 sc->sc_stats_jbc_ce_async++;
1264 val = FIRE_CTRL_READ_8(sc,
1265 FIRE_JBCINT_ITRANS_ERR_LOG);
1266 device_printf(dev,
1267 "JBCINT in transaction error log %#llx\n",
1268 (unsigned long long)val);
1269 val = FIRE_CTRL_READ_8(sc,
1270 FIRE_JBCINT_ITRANS_ERR_LOG2);
1271 device_printf(dev,
1272 "JBCINT in transaction error log 2 "
1273 "%#llx\n", (unsigned long long)val);
1274 }
1275 if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1276 FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1277 FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1278 FIRE_JBC_ERR_INT_PIO_DPE_P |
1279 FIRE_JBC_ERR_INT_PIO_CPE_P |
1280 FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1281 fatal = 1;
1282 val = FIRE_CTRL_READ_8(sc,
1283 FIRE_JBC_CSR_ERR_LOG);
1284 device_printf(dev,
1285 "DMCINT ODCD error log %#llx\n",
1286 (unsigned long long)val);
1287 }
1288 if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1289 FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1290 FIRE_JBC_ERR_INT_PIO_CPE_P |
1291 FIRE_JBC_ERR_INT_JTCEEW_P |
1292 FIRE_JBC_ERR_INT_JTCEEI_P |
1293 FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1294 fatal = 1;
1295 val = FIRE_CTRL_READ_8(sc,
1296 FIRE_FATAL_ERR_LOG);
1297 device_printf(dev, "fatal error log %#llx\n",
1298 (unsigned long long)val);
1299 val = FIRE_CTRL_READ_8(sc,
1300 FIRE_FATAL_ERR_LOG2);
1301 device_printf(dev, "fatal error log 2 "
1302 "%#llx\n", (unsigned long long)val);
1303 }
1304 if (fatal != 0) {
1305 mtx_unlock_spin(&sc->sc_pcib_mtx);
1306 panic("%s: fatal JBC core block error",
1307 device_get_nameunit(sc->sc_dev));
1308 } else {
1309 FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1310 mtx_unlock_spin(&sc->sc_pcib_mtx);
1311 }
1312 } else {
1313 mtx_unlock_spin(&sc->sc_pcib_mtx);
1314 panic("%s: unknown JCB core block error status %#llx",
1315 device_get_nameunit(sc->sc_dev),
1316 (unsigned long long)errstat);
1317 }
1318 }
1319 return (FILTER_HANDLED);
1320 }
1321
1322 static int
1323 fire_pcie(void *arg)
1324 {
1325 struct fire_msiqarg *fmqa;
1326 struct fire_softc *sc;
1327 struct fo_msiq_record *qrec;
1328 device_t dev;
1329 uint64_t word0;
1330 u_int head, msg, msiq;
1331
1332 fmqa = arg;
1333 sc = fmqa->fmqa_fica.fica_sc;
1334 dev = sc->sc_dev;
1335 msiq = fmqa->fmqa_msiq;
1336 mtx_lock_spin(&fmqa->fmqa_mtx);
1337 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1338 FO_PCI_EQ_HD_SHFT;
1339 qrec = &fmqa->fmqa_base[head];
1340 word0 = qrec->fomqr_word0;
1341 for (;;) {
1342 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1343 ("%s: received non-PCIe message in event queue %d "
1344 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1345 (unsigned long long)word0));
1346 msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1347 FO_MQR_WORD0_DATA0_SHFT;
1348
1349 #define PCIE_MSG_CODE_ERR_COR 0x30
1350 #define PCIE_MSG_CODE_ERR_NONFATAL 0x31
1351 #define PCIE_MSG_CODE_ERR_FATAL 0x33
1352
1353 if (msg == PCIE_MSG_CODE_ERR_COR)
1354 device_printf(dev, "correctable PCIe error\n");
1355 else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1356 msg == PCIE_MSG_CODE_ERR_FATAL)
1357 panic("%s: %sfatal PCIe error",
1358 device_get_nameunit(dev),
1359 msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1360 else
1361 panic("%s: received unknown PCIe message %#x",
1362 device_get_nameunit(dev), msg);
1363 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1364 head = (head + 1) % sc->sc_msiq_size;
1365 qrec = &fmqa->fmqa_base[head];
1366 word0 = qrec->fomqr_word0;
1367 if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1368 break;
1369 }
1370 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1371 FO_PCI_EQ_HD_SHFT);
1372 if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1373 FO_PCI_EQ_TL_OVERR) != 0) {
1374 device_printf(dev, "event queue %d overflow\n", msiq);
1375 msiq <<= 3;
1376 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1377 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1378 FO_PCI_EQ_CTRL_CLR_COVERR);
1379 }
1380 mtx_unlock_spin(&fmqa->fmqa_mtx);
1381 return (FILTER_HANDLED);
1382 }
1383
1384 static int
1385 fire_maxslots(device_t dev)
1386 {
1387
1388 return (1);
1389 }
1390
1391 static uint32_t
1392 fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1393 int width)
1394 {
1395 struct fire_softc *sc;
1396 bus_space_handle_t bh;
1397 u_long offset = 0;
1398 uint32_t r, wrd;
1399 int i;
1400 uint16_t shrt;
1401 uint8_t byte;
1402
1403 sc = device_get_softc(dev);
1404 if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1405 slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1406 return (-1);
1407
1408 offset = FO_CONF_OFF(bus, slot, func, reg);
1409 bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1410 switch (width) {
1411 case 1:
1412 i = bus_space_peek_1(sc->sc_pci_cfgt, bh, offset, &byte);
1413 r = byte;
1414 break;
1415 case 2:
1416 i = bus_space_peek_2(sc->sc_pci_cfgt, bh, offset, &shrt);
1417 r = shrt;
1418 break;
1419 case 4:
1420 i = bus_space_peek_4(sc->sc_pci_cfgt, bh, offset, &wrd);
1421 r = wrd;
1422 break;
1423 default:
1424 panic("%s: bad width", __func__);
1425 /* NOTREACHED */
1426 }
1427
1428 if (i) {
1429 #ifdef FIRE_DEBUG
1430 printf("%s: read data error reading: %d.%d.%d: 0x%x\n",
1431 __func__, bus, slot, func, reg);
1432 #endif
1433 r = -1;
1434 }
1435 return (r);
1436 }
1437
1438 static void
1439 fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1440 uint32_t val, int width)
1441 {
1442 struct fire_softc *sc;
1443 bus_space_handle_t bh;
1444 u_long offset = 0;
1445
1446 sc = device_get_softc(dev);
1447 if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1448 slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1449 return;
1450
1451 offset = FO_CONF_OFF(bus, slot, func, reg);
1452 bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1453 switch (width) {
1454 case 1:
1455 bus_space_write_1(sc->sc_pci_cfgt, bh, offset, val);
1456 break;
1457 case 2:
1458 bus_space_write_2(sc->sc_pci_cfgt, bh, offset, val);
1459 break;
1460 case 4:
1461 bus_space_write_4(sc->sc_pci_cfgt, bh, offset, val);
1462 break;
1463 default:
1464 panic("%s: bad width", __func__);
1465 /* NOTREACHED */
1466 }
1467 }
1468
1469 static int
1470 fire_route_interrupt(device_t bridge, device_t dev, int pin)
1471 {
1472 struct fire_softc *sc;
1473 struct ofw_pci_register reg;
1474 ofw_pci_intr_t pintr, mintr;
1475 uint8_t maskbuf[sizeof(reg) + sizeof(pintr)];
1476
1477 sc = device_get_softc(bridge);
1478 pintr = pin;
1479 if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo,
1480 ®, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr),
1481 NULL, maskbuf) != 0)
1482 return (mintr);
1483
1484 device_printf(bridge, "could not route pin %d for device %d.%d\n",
1485 pin, pci_get_slot(dev), pci_get_function(dev));
1486 return (PCI_INVALID_IRQ);
1487 }
1488
1489 static int
1490 fire_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1491 {
1492 struct fire_softc *sc;
1493
1494 sc = device_get_softc(dev);
1495 switch (which) {
1496 case PCIB_IVAR_DOMAIN:
1497 *result = device_get_unit(dev);
1498 return (0);
1499 case PCIB_IVAR_BUS:
1500 *result = sc->sc_pci_secbus;
1501 return (0);
1502 }
1503 return (ENOENT);
1504 }
1505
1506 static void
1507 fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1508 bus_dmasync_op_t op)
1509 {
1510 static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE);
1511 register_t reg, s;
1512
1513 if ((map->dm_flags & DMF_LOADED) == 0)
1514 return;
1515
1516 if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1517 s = intr_disable();
1518 reg = rd(fprs);
1519 wr(fprs, reg | FPRS_FEF, 0);
1520 __asm __volatile("stda %%f0, [%0] %1"
1521 : : "r" (buf), "n" (ASI_BLK_COMMIT_S));
1522 membar(Sync);
1523 wr(fprs, reg, 0);
1524 intr_restore(s);
1525 } else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1526 membar(Sync);
1527 }
1528
1529 static void
1530 fire_intr_enable(void *arg)
1531 {
1532 struct intr_vector *iv;
1533 struct fire_icarg *fica;
1534 struct fire_softc *sc;
1535 struct pcpu *pc;
1536 uint64_t mr;
1537 u_int ctrl, i;
1538
1539 iv = arg;
1540 fica = iv->iv_icarg;
1541 sc = fica->fica_sc;
1542 mr = FO_PCI_IMAP_V;
1543 if (sc->sc_mode == FIRE_MODE_OBERON)
1544 mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1545 OBERON_PCI_IMAP_T_DESTID_MASK;
1546 else
1547 mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1548 FIRE_PCI_IMAP_T_JPID_MASK;
1549 /*
1550 * Given that all mondos for the same target are required to use the
1551 * same interrupt controller we just use the CPU ID for indexing the
1552 * latter.
1553 */
1554 ctrl = 0;
1555 for (i = 0; i < mp_ncpus; ++i) {
1556 pc = pcpu_find(i);
1557 if (pc == NULL || iv->iv_mid != pc->pc_mid)
1558 continue;
1559 ctrl = pc->pc_cpuid % 4;
1560 break;
1561 }
1562 mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1563 FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1564 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1565 }
1566
1567 static void
1568 fire_intr_disable(void *arg)
1569 {
1570 struct intr_vector *iv;
1571 struct fire_icarg *fica;
1572 struct fire_softc *sc;
1573
1574 iv = arg;
1575 fica = iv->iv_icarg;
1576 sc = fica->fica_sc;
1577 FIRE_PCI_WRITE_8(sc, fica->fica_map,
1578 FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1579 }
1580
1581 static void
1582 fire_intr_assign(void *arg)
1583 {
1584 struct intr_vector *iv;
1585 struct fire_icarg *fica;
1586 struct fire_softc *sc;
1587 uint64_t mr;
1588
1589 iv = arg;
1590 fica = iv->iv_icarg;
1591 sc = fica->fica_sc;
1592 mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1593 if ((mr & FO_PCI_IMAP_V) != 0) {
1594 FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1595 FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1596 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1597 }
1598 while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1599 ;
1600 if ((mr & FO_PCI_IMAP_V) != 0)
1601 fire_intr_enable(arg);
1602 }
1603
1604 static void
1605 fire_intr_clear(void *arg)
1606 {
1607 struct intr_vector *iv;
1608 struct fire_icarg *fica;
1609
1610 iv = arg;
1611 fica = iv->iv_icarg;
1612 FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1613 }
1614
1615 /*
1616 * Given that the event queue implementation matches our current MD and MI
1617 * interrupt frameworks like square pegs fit into round holes we are generous
1618 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1619 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1620 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1621 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1622 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1623 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1624 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1625 */
1626
1627 static int
1628 fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1629 int *irqs)
1630 {
1631 struct fire_softc *sc;
1632 u_int i, j, msiqrun;
1633
1634 if (powerof2(count) == 0 || count > 32)
1635 return (EINVAL);
1636
1637 sc = device_get_softc(dev);
1638 mtx_lock(&sc->sc_msi_mtx);
1639 msiqrun = 0;
1640 for (i = 0; i < sc->sc_msiq_count; i++) {
1641 for (j = i; j < i + count; j++) {
1642 if (isclr(sc->sc_msiq_bitmap, j) == 0)
1643 break;
1644 }
1645 if (j == i + count) {
1646 msiqrun = i;
1647 break;
1648 }
1649 }
1650 if (i == sc->sc_msiq_count) {
1651 mtx_unlock(&sc->sc_msi_mtx);
1652 return (ENXIO);
1653 }
1654 for (i = 0; i + count < sc->sc_msi_count; i += count) {
1655 for (j = i; j < i + count; j++)
1656 if (isclr(sc->sc_msi_bitmap, j) == 0)
1657 break;
1658 if (j == i + count) {
1659 for (j = 0; j < count; j++) {
1660 setbit(sc->sc_msiq_bitmap, msiqrun + j);
1661 setbit(sc->sc_msi_bitmap, i + j);
1662 sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1663 irqs[j] = sc->sc_msi_first + i + j;
1664 }
1665 mtx_unlock(&sc->sc_msi_mtx);
1666 return (0);
1667 }
1668 }
1669 mtx_unlock(&sc->sc_msi_mtx);
1670 return (ENXIO);
1671 }
1672
1673 static int
1674 fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1675 {
1676 struct fire_softc *sc;
1677 u_int i;
1678
1679 sc = device_get_softc(dev);
1680 mtx_lock(&sc->sc_msi_mtx);
1681 for (i = 0; i < count; i++) {
1682 clrbit(sc->sc_msiq_bitmap,
1683 sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1684 clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1685 }
1686 mtx_unlock(&sc->sc_msi_mtx);
1687 return (0);
1688 }
1689
1690 static int
1691 fire_alloc_msix(device_t dev, device_t child, int *irq)
1692 {
1693 struct fire_softc *sc;
1694 int i, msiq;
1695
1696 sc = device_get_softc(dev);
1697 if ((sc->sc_flags & FIRE_MSIX) == 0)
1698 return (ENXIO);
1699 mtx_lock(&sc->sc_msi_mtx);
1700 msiq = 0;
1701 for (i = 0; i < sc->sc_msiq_count; i++) {
1702 if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1703 msiq = i;
1704 break;
1705 }
1706 }
1707 if (i == sc->sc_msiq_count) {
1708 mtx_unlock(&sc->sc_msi_mtx);
1709 return (ENXIO);
1710 }
1711 for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1712 if (isclr(sc->sc_msi_bitmap, i) != 0) {
1713 setbit(sc->sc_msiq_bitmap, msiq);
1714 setbit(sc->sc_msi_bitmap, i);
1715 sc->sc_msi_msiq_table[i] = msiq;
1716 *irq = sc->sc_msi_first + i;
1717 mtx_unlock(&sc->sc_msi_mtx);
1718 return (0);
1719 }
1720 }
1721 mtx_unlock(&sc->sc_msi_mtx);
1722 return (ENXIO);
1723 }
1724
1725 static int
1726 fire_release_msix(device_t dev, device_t child, int irq)
1727 {
1728 struct fire_softc *sc;
1729
1730 sc = device_get_softc(dev);
1731 if ((sc->sc_flags & FIRE_MSIX) == 0)
1732 return (ENXIO);
1733 mtx_lock(&sc->sc_msi_mtx);
1734 clrbit(sc->sc_msiq_bitmap,
1735 sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1736 clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1737 mtx_unlock(&sc->sc_msi_mtx);
1738 return (0);
1739 }
1740
1741 static int
1742 fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1743 uint32_t *data)
1744 {
1745 struct fire_softc *sc;
1746 struct pci_devinfo *dinfo;
1747
1748 sc = device_get_softc(dev);
1749 dinfo = device_get_ivars(child);
1750 if (dinfo->cfg.msi.msi_alloc > 0) {
1751 if ((irq & ~sc->sc_msi_data_mask) != 0) {
1752 device_printf(dev, "invalid MSI 0x%x\n", irq);
1753 return (EINVAL);
1754 }
1755 } else {
1756 if ((sc->sc_flags & FIRE_MSIX) == 0)
1757 return (ENXIO);
1758 if (fls(irq) > sc->sc_msix_data_width) {
1759 device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1760 return (EINVAL);
1761 }
1762 }
1763 if (dinfo->cfg.msi.msi_alloc > 0 &&
1764 (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1765 *addr = sc->sc_msi_addr32;
1766 else
1767 *addr = sc->sc_msi_addr64;
1768 *data = irq;
1769 return (0);
1770 }
1771
1772 static void
1773 fire_msiq_handler(void *cookie)
1774 {
1775 struct intr_vector *iv;
1776 struct fire_msiqarg *fmqa;
1777
1778 iv = cookie;
1779 fmqa = iv->iv_icarg;
1780 /*
1781 * Note that since fire_intr_clear() will clear the event queue
1782 * interrupt after the handler associated with the MSI [sic] has
1783 * been executed we have to protect the access to the event queue as
1784 * otherwise nested event queue interrupts cause corruption of the
1785 * event queue on MP machines. Obviously especially when abandoning
1786 * the 1:1 mapping it would be better to not clear the event queue
1787 * interrupt after each handler invocation but only once when the
1788 * outstanding MSIs have been processed but unfortunately that
1789 * doesn't work well and leads to interrupt storms with controllers/
1790 * drivers which don't mask interrupts while the handler is executed.
1791 * Maybe delaying clearing the MSI until after the handler has been
1792 * executed could be used to work around this but that's not the
1793 * intended usage and might in turn cause lost MSIs.
1794 */
1795 mtx_lock_spin(&fmqa->fmqa_mtx);
1796 fire_msiq_common(iv, fmqa);
1797 mtx_unlock_spin(&fmqa->fmqa_mtx);
1798 }
1799
1800 static void
1801 fire_msiq_filter(void *cookie)
1802 {
1803 struct intr_vector *iv;
1804 struct fire_msiqarg *fmqa;
1805
1806 iv = cookie;
1807 fmqa = iv->iv_icarg;
1808 /*
1809 * For filters we don't use fire_intr_clear() since it would clear
1810 * the event queue interrupt while we're still processing the event
1811 * queue as filters and associated post-filter handler are executed
1812 * directly, which in turn would lead to lost MSIs. So we clear the
1813 * event queue interrupt only once after processing the event queue.
1814 * Given that this still guarantees the filters to not be executed
1815 * concurrently and no other CPU can clear the event queue interrupt
1816 * while the event queue is still processed, we don't even need to
1817 * interlock the access to the event queue in this case.
1818 */
1819 critical_enter();
1820 fire_msiq_common(iv, fmqa);
1821 FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1822 INTCLR_IDLE);
1823 critical_exit();
1824 }
1825
1826 static inline void
1827 fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1828 {
1829 struct fire_softc *sc;
1830 struct fo_msiq_record *qrec;
1831 device_t dev;
1832 uint64_t word0;
1833 u_int head, msi, msiq;
1834
1835 sc = fmqa->fmqa_fica.fica_sc;
1836 dev = sc->sc_dev;
1837 msiq = fmqa->fmqa_msiq;
1838 head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1839 FO_PCI_EQ_HD_SHFT;
1840 qrec = &fmqa->fmqa_base[head];
1841 word0 = qrec->fomqr_word0;
1842 for (;;) {
1843 if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1844 break;
1845 KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1846 (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1847 ("%s: received non-MSI/MSI-X message in event queue %d "
1848 "(word0 %#llx)", device_get_nameunit(dev), msiq,
1849 (unsigned long long)word0));
1850 msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1851 FO_MQR_WORD0_DATA0_SHFT;
1852 /*
1853 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1854 */
1855 KASSERT(msi == fmqa->fmqa_msi,
1856 ("%s: received non-matching MSI/MSI-X in event queue %d "
1857 "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1858 fmqa->fmqa_msi));
1859 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1860 FO_PCI_MSI_CLR_EQWR_N);
1861 if (__predict_false(intr_event_handle(iv->iv_event,
1862 NULL) != 0))
1863 printf("stray MSI/MSI-X in event queue %d\n", msiq);
1864 qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1865 head = (head + 1) % sc->sc_msiq_size;
1866 qrec = &fmqa->fmqa_base[head];
1867 word0 = qrec->fomqr_word0;
1868 }
1869 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1870 FO_PCI_EQ_HD_SHFT);
1871 if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1872 FO_PCI_EQ_TL_OVERR) != 0)) {
1873 device_printf(dev, "event queue %d overflow\n", msiq);
1874 msiq <<= 3;
1875 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1876 FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1877 FO_PCI_EQ_CTRL_CLR_COVERR);
1878 }
1879 }
1880
1881 static int
1882 fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1883 int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1884 void **cookiep)
1885 {
1886 struct fire_softc *sc;
1887 struct fire_msiqarg *fmqa;
1888 u_long vec;
1889 int error;
1890 u_int msi, msiq;
1891
1892 sc = device_get_softc(dev);
1893 /*
1894 * XXX this assumes that a device only has one INTx, while in fact
1895 * Cassini+ and Saturn can use all four the firmware has assigned
1896 * to them, but so does pci(4).
1897 */
1898 if (rman_get_rid(ires) != 0) {
1899 msi = rman_get_start(ires);
1900 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1901 vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1902 msiq += sc->sc_msiq_first;
1903 if (intr_vectors[vec].iv_ic != &fire_ic) {
1904 device_printf(dev,
1905 "invalid interrupt controller for vector 0x%lx\n",
1906 vec);
1907 return (EINVAL);
1908 }
1909 /*
1910 * The MD interrupt code needs the vector rather than the MSI.
1911 */
1912 rman_set_start(ires, vec);
1913 rman_set_end(ires, vec);
1914 error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1915 intr, arg, cookiep);
1916 rman_set_start(ires, msi);
1917 rman_set_end(ires, msi);
1918 if (error != 0)
1919 return (error);
1920 fmqa = intr_vectors[vec].iv_icarg;
1921 /*
1922 * XXX inject our event queue handler.
1923 */
1924 if (filt != NULL) {
1925 intr_vectors[vec].iv_func = fire_msiq_filter;
1926 intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1927 /*
1928 * Ensure the event queue interrupt is cleared, it
1929 * might have triggered before. Given we supply NULL
1930 * as ic_clear, inthand_add() won't do this for us.
1931 */
1932 FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1933 INTCLR_IDLE);
1934 } else
1935 intr_vectors[vec].iv_func = fire_msiq_handler;
1936 /* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1937 fmqa->fmqa_msi = msi;
1938 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1939 FO_PCI_EQ_CTRL_SET_EN);
1940 msi <<= 3;
1941 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1942 (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1943 ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1944 ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1945 FO_PCI_MSI_MAP_EQNUM_MASK));
1946 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1947 FO_PCI_MSI_CLR_EQWR_N);
1948 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1949 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1950 FO_PCI_MSI_MAP_V);
1951 return (error);
1952 }
1953
1954 /*
1955 * Make sure the vector is fully specified and we registered
1956 * our interrupt controller for it.
1957 */
1958 vec = rman_get_start(ires);
1959 if (INTIGN(vec) != sc->sc_ign) {
1960 device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1961 return (EINVAL);
1962 }
1963 if (intr_vectors[vec].iv_ic != &fire_ic) {
1964 device_printf(dev,
1965 "invalid interrupt controller for vector 0x%lx\n", vec);
1966 return (EINVAL);
1967 }
1968 return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1969 arg, cookiep));
1970 }
1971
1972 static int
1973 fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1974 void *cookie)
1975 {
1976 struct fire_softc *sc;
1977 u_long vec;
1978 int error;
1979 u_int msi, msiq;
1980
1981 sc = device_get_softc(dev);
1982 if (rman_get_rid(ires) != 0) {
1983 msi = rman_get_start(ires);
1984 msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1985 vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1986 msiq += sc->sc_msiq_first;
1987 msi <<= 3;
1988 FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1989 FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1990 ~FO_PCI_MSI_MAP_V);
1991 msiq <<= 3;
1992 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1993 FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1994 FO_PCI_EQ_CTRL_CLR_DIS);
1995 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1996 (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1997 FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1998 (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1999 intr_vectors[vec].iv_ic = &fire_ic;
2000 /*
2001 * The MD interrupt code needs the vector rather than the MSI.
2002 */
2003 rman_set_start(ires, vec);
2004 rman_set_end(ires, vec);
2005 error = bus_generic_teardown_intr(dev, child, ires, cookie);
2006 msi >>= 3;
2007 rman_set_start(ires, msi);
2008 rman_set_end(ires, msi);
2009 return (error);
2010 }
2011 return (bus_generic_teardown_intr(dev, child, ires, cookie));
2012 }
2013
2014 static struct resource *
2015 fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
2016 u_long start, u_long end, u_long count, u_int flags)
2017 {
2018 struct fire_softc *sc;
2019 struct resource *rv;
2020 struct rman *rm;
2021
2022 sc = device_get_softc(bus);
2023 switch (type) {
2024 case SYS_RES_IRQ:
2025 /*
2026 * XXX: Don't accept blank ranges for now, only single
2027 * interrupts. The other case should not happen with
2028 * the MI PCI code...
2029 * XXX: This may return a resource that is out of the
2030 * range that was specified. Is this correct...?
2031 */
2032 if (start != end)
2033 panic("%s: XXX: interrupt range", __func__);
2034 if (*rid == 0)
2035 start = end = INTMAP_VEC(sc->sc_ign, end);
2036 return (bus_generic_alloc_resource(bus, child, type, rid,
2037 start, end, count, flags));
2038 case SYS_RES_MEMORY:
2039 rm = &sc->sc_pci_mem_rman;
2040 break;
2041 case SYS_RES_IOPORT:
2042 rm = &sc->sc_pci_io_rman;
2043 break;
2044 default:
2045 return (NULL);
2046 }
2047
2048 rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
2049 child);
2050 if (rv == NULL)
2051 return (NULL);
2052 rman_set_rid(rv, *rid);
2053
2054 if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child, type,
2055 *rid, rv) != 0) {
2056 rman_release_resource(rv);
2057 return (NULL);
2058 }
2059 return (rv);
2060 }
2061
2062 static int
2063 fire_activate_resource(device_t bus, device_t child, int type, int rid,
2064 struct resource *r)
2065 {
2066 struct fire_softc *sc;
2067 struct bus_space_tag *tag;
2068
2069 sc = device_get_softc(bus);
2070 switch (type) {
2071 case SYS_RES_IRQ:
2072 return (bus_generic_activate_resource(bus, child, type, rid,
2073 r));
2074 case SYS_RES_MEMORY:
2075 tag = sparc64_alloc_bus_tag(r, rman_get_bustag(
2076 sc->sc_mem_res[FIRE_PCI]), PCI_MEMORY_BUS_SPACE, NULL);
2077 if (tag == NULL)
2078 return (ENOMEM);
2079 rman_set_bustag(r, tag);
2080 rman_set_bushandle(r, sc->sc_pci_bh[OFW_PCI_CS_MEM32] +
2081 rman_get_start(r));
2082 break;
2083 case SYS_RES_IOPORT:
2084 rman_set_bustag(r, sc->sc_pci_iot);
2085 rman_set_bushandle(r, sc->sc_pci_bh[OFW_PCI_CS_IO] +
2086 rman_get_start(r));
2087 break;
2088 }
2089 return (rman_activate_resource(r));
2090 }
2091
2092 static int
2093 fire_adjust_resource(device_t bus, device_t child, int type,
2094 struct resource *r, u_long start, u_long end)
2095 {
2096 struct fire_softc *sc;
2097 struct rman *rm;
2098
2099 sc = device_get_softc(bus);
2100 switch (type) {
2101 case SYS_RES_IRQ:
2102 return (bus_generic_adjust_resource(bus, child, type, r,
2103 start, end));
2104 case SYS_RES_MEMORY:
2105 rm = &sc->sc_pci_mem_rman;
2106 break;
2107 case SYS_RES_IOPORT:
2108 rm = &sc->sc_pci_io_rman;
2109 break;
2110 default:
2111 return (EINVAL);
2112 }
2113 if (rman_is_region_manager(r, rm) == 0)
2114 return (EINVAL);
2115 return (rman_adjust_resource(r, start, end));
2116 }
2117
2118 static bus_dma_tag_t
2119 fire_get_dma_tag(device_t bus, device_t child __unused)
2120 {
2121 struct fire_softc *sc;
2122
2123 sc = device_get_softc(bus);
2124 return (sc->sc_pci_dmat);
2125 }
2126
2127 static phandle_t
2128 fire_get_node(device_t bus, device_t child __unused)
2129 {
2130 struct fire_softc *sc;
2131
2132 sc = device_get_softc(bus);
2133 /* We only have one child, the PCI bus, which needs our own node. */
2134 return (sc->sc_node);
2135 }
2136
2137 static u_int
2138 fire_get_timecount(struct timecounter *tc)
2139 {
2140 struct fire_softc *sc;
2141
2142 sc = tc->tc_priv;
2143 return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);
2144 }
Cache object: f8fdea7a2431b2a8a17e675721f751c5
|