1 /*-
2 * Copyright (c) 2016 Stanislav Galabov.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD: releng/12.0/sys/mips/mediatek/mtk_pcie.c 336639 2018-07-23 15:36:55Z avg $");
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30
31 #include <sys/bus.h>
32 #include <sys/interrupt.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/rman.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/endian.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_extern.h>
44
45 #include <machine/bus.h>
46 #include <machine/cpu.h>
47 #include <machine/intr.h>
48 #include <machine/pmap.h>
49
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcireg.h>
52
53 #include <dev/pci/pcib_private.h>
54
55 #include <dev/fdt/fdt_common.h>
56 #include <dev/fdt/fdt_clock.h>
57 #include <dev/ofw/openfirm.h>
58 #include <dev/ofw/ofw_bus.h>
59 #include <dev/ofw/ofw_bus_subr.h>
60
61 #include <mips/mediatek/mtk_pcie.h>
62 #include <mips/mediatek/mtk_soc.h>
63 #include <mips/mediatek/mtk_sysctl.h>
64 #include <mips/mediatek/fdt_reset.h>
65
66 #include "ofw_bus_if.h"
67 #include "pcib_if.h"
68 #include "pic_if.h"
69
70 /*
71 * Note: We only support PCIe at the moment.
72 * Most SoCs in the Ralink/Mediatek family that we target actually don't
73 * support PCI anyway, with the notable exceptions being RT3662/RT3883, which
74 * support both PCI and PCIe. If there exists a board based on one of them
75 * which is of interest in the future it shouldn't be too hard to enable PCI
76 * support for it.
77 */
78
79 /* Chip specific function declarations */
80 static int mtk_pcie_phy_init(device_t);
81 static int mtk_pcie_phy_start(device_t);
82 static int mtk_pcie_phy_stop(device_t);
83 static int mtk_pcie_phy_mt7621_init(device_t);
84 static int mtk_pcie_phy_mt7628_init(device_t);
85 static int mtk_pcie_phy_mt7620_init(device_t);
86 static int mtk_pcie_phy_rt3883_init(device_t);
87 static void mtk_pcie_phy_setup_slots(device_t);
88
89 /* Generic declarations */
90 struct mtx mtk_pci_mtx;
91 MTX_SYSINIT(mtk_pci_mtx, &mtk_pci_mtx, "MTK PCIe mutex", MTX_SPIN);
92
93 static int mtk_pci_intr(void *);
94
95 static struct mtk_pci_softc *mt_sc = NULL;
96
97 struct mtk_pci_range {
98 u_long base;
99 u_long len;
100 };
101
102 #define FDT_RANGES_CELLS ((1 + 2 + 3) * 2)
103
104 static void
105 mtk_pci_range_dump(struct mtk_pci_range *range)
106 {
107 #ifdef DEBUG
108 printf("\n");
109 printf(" base = 0x%08lx\n", range->base);
110 printf(" len = 0x%08lx\n", range->len);
111 #endif
112 }
113
114 static int
115 mtk_pci_ranges_decode(phandle_t node, struct mtk_pci_range *io_space,
116 struct mtk_pci_range *mem_space)
117 {
118 struct mtk_pci_range *pci_space;
119 pcell_t ranges[FDT_RANGES_CELLS];
120 pcell_t addr_cells, size_cells, par_addr_cells;
121 pcell_t *rangesptr;
122 pcell_t cell0, cell1, cell2;
123 int tuple_size, tuples, i, rv, len;
124
125 /*
126 * Retrieve 'ranges' property.
127 */
128 if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
129 return (EINVAL);
130 if (addr_cells != 3 || size_cells != 2)
131 return (ERANGE);
132
133 par_addr_cells = fdt_parent_addr_cells(node);
134 if (par_addr_cells != 1)
135 return (ERANGE);
136
137 len = OF_getproplen(node, "ranges");
138 if (len > sizeof(ranges))
139 return (ENOMEM);
140
141 if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0)
142 return (EINVAL);
143
144 tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
145 size_cells);
146 tuples = len / tuple_size;
147
148 /*
149 * Initialize the ranges so that we don't have to worry about
150 * having them all defined in the FDT. In particular, it is
151 * perfectly fine not to want I/O space on PCI busses.
152 */
153 bzero(io_space, sizeof(*io_space));
154 bzero(mem_space, sizeof(*mem_space));
155
156 rangesptr = &ranges[0];
157 for (i = 0; i < tuples; i++) {
158 cell0 = fdt_data_get((void *)rangesptr, 1);
159 rangesptr++;
160 cell1 = fdt_data_get((void *)rangesptr, 1);
161 rangesptr++;
162 cell2 = fdt_data_get((void *)rangesptr, 1);
163 rangesptr++;
164
165 if (cell0 & 0x02000000) {
166 pci_space = mem_space;
167 } else if (cell0 & 0x01000000) {
168 pci_space = io_space;
169 } else {
170 rv = ERANGE;
171 goto out;
172 }
173
174 pci_space->base = fdt_data_get((void *)rangesptr,
175 par_addr_cells);
176 rangesptr += par_addr_cells;
177
178 pci_space->len = fdt_data_get((void *)rangesptr, size_cells);
179 rangesptr += size_cells;
180 }
181
182 rv = 0;
183 out:
184 return (rv);
185 }
186
187 static int
188 mtk_pci_ranges(phandle_t node, struct mtk_pci_range *io_space,
189 struct mtk_pci_range *mem_space)
190 {
191 int err;
192
193 if ((err = mtk_pci_ranges_decode(node, io_space, mem_space)) != 0) {
194 return (err);
195 }
196
197 mtk_pci_range_dump(io_space);
198 mtk_pci_range_dump(mem_space);
199
200 return (0);
201 }
202
203 static struct ofw_compat_data compat_data[] = {
204 { "ralink,rt3883-pci", MTK_SOC_RT3883 },
205 { "mediatek,mt7620-pci", MTK_SOC_MT7620A },
206 { "mediatek,mt7628-pci", MTK_SOC_MT7628 },
207 { "mediatek,mt7621-pci", MTK_SOC_MT7621 },
208 { NULL, MTK_SOC_UNKNOWN }
209 };
210
211 static int
212 mtk_pci_probe(device_t dev)
213 {
214 struct mtk_pci_softc *sc = device_get_softc(dev);
215
216 if (!ofw_bus_status_okay(dev))
217 return (ENXIO);
218
219 sc->socid = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
220 if (sc->socid == MTK_SOC_UNKNOWN)
221 return (ENXIO);
222
223 device_set_desc(dev, "MTK PCIe Controller");
224
225 return (0);
226 }
227
228 static int
229 mtk_pci_attach(device_t dev)
230 {
231 struct mtk_pci_softc *sc = device_get_softc(dev);
232 struct mtk_pci_range io_space, mem_space;
233 phandle_t node;
234 intptr_t xref;
235 int i, rid;
236
237 sc->sc_dev = dev;
238 mt_sc = sc;
239 sc->addr_mask = 0xffffffff;
240
241 /* Request our memory */
242 rid = 0;
243 sc->pci_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
244 RF_ACTIVE);
245 if (sc->pci_res[0] == NULL) {
246 device_printf(dev, "could not allocate memory resource\n");
247 return (ENXIO);
248 }
249
250 /* See how many interrupts we need */
251 if (sc->socid == MTK_SOC_MT7621)
252 sc->sc_num_irq = 3;
253 else {
254 sc->sc_num_irq = 1;
255 sc->pci_res[2] = sc->pci_res[3] = NULL;
256 sc->pci_intrhand[1] = sc->pci_intrhand[2] = NULL;
257 }
258
259 /* Request our interrupts */
260 for (i = 1; i <= sc->sc_num_irq ; i++) {
261 rid = i - 1;
262 sc->pci_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
263 RF_ACTIVE);
264 if (sc->pci_res[i] == NULL) {
265 device_printf(dev, "could not allocate interrupt "
266 "resource %d\n", rid);
267 goto cleanup_res;
268 }
269 }
270
271 /* Parse our PCI 'ranges' property */
272 node = ofw_bus_get_node(dev);
273 xref = OF_xref_from_node(node);
274 if (mtk_pci_ranges(node, &io_space, &mem_space)) {
275 device_printf(dev, "could not retrieve 'ranges' data\n");
276 goto cleanup_res;
277 }
278
279 /* Memory, I/O and IRQ resource limits */
280 sc->sc_io_base = io_space.base;
281 sc->sc_io_size = io_space.len;
282 sc->sc_mem_base = mem_space.base;
283 sc->sc_mem_size = mem_space.len;
284 sc->sc_irq_start = MTK_PCIE0_IRQ;
285 sc->sc_irq_end = MTK_PCIE2_IRQ;
286
287 /* Init resource managers for memory, I/O and IRQ */
288 sc->sc_mem_rman.rm_type = RMAN_ARRAY;
289 sc->sc_mem_rman.rm_descr = "mtk pcie memory window";
290 if (rman_init(&sc->sc_mem_rman) != 0 ||
291 rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
292 sc->sc_mem_base + sc->sc_mem_size - 1) != 0) {
293 device_printf(dev, "failed to setup memory rman\n");
294 goto cleanup_res;
295 }
296
297 sc->sc_io_rman.rm_type = RMAN_ARRAY;
298 sc->sc_io_rman.rm_descr = "mtk pcie io window";
299 if (rman_init(&sc->sc_io_rman) != 0 ||
300 rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
301 sc->sc_io_base + sc->sc_io_size - 1) != 0) {
302 device_printf(dev, "failed to setup io rman\n");
303 goto cleanup_res;
304 }
305
306 sc->sc_irq_rman.rm_type = RMAN_ARRAY;
307 sc->sc_irq_rman.rm_descr = "mtk pcie irqs";
308 if (rman_init(&sc->sc_irq_rman) != 0 ||
309 rman_manage_region(&sc->sc_irq_rman, sc->sc_irq_start,
310 sc->sc_irq_end) != 0) {
311 device_printf(dev, "failed to setup irq rman\n");
312 goto cleanup_res;
313 }
314
315 /* Do SoC-specific PCIe initialization */
316 if (mtk_pcie_phy_init(dev)) {
317 device_printf(dev, "pcie phy init failed\n");
318 goto cleanup_rman;
319 }
320
321 /* Register ourselves as an interrupt controller */
322 if (intr_pic_register(dev, xref) == NULL) {
323 device_printf(dev, "could not register PIC\n");
324 goto cleanup_rman;
325 }
326
327 /* Set up our interrupt handler */
328 for (i = 1; i <= sc->sc_num_irq; i++) {
329 sc->pci_intrhand[i - 1] = NULL;
330 if (bus_setup_intr(dev, sc->pci_res[i], INTR_TYPE_MISC,
331 mtk_pci_intr, NULL, sc, &sc->pci_intrhand[i - 1])) {
332 device_printf(dev, "could not setup intr handler %d\n",
333 i);
334 goto cleanup;
335 }
336 }
337
338 /* Attach our PCI child so bus enumeration can start */
339 if (device_add_child(dev, "pci", -1) == NULL) {
340 device_printf(dev, "could not attach pci bus\n");
341 goto cleanup;
342 }
343
344 /* And finally, attach ourselves to the bus */
345 if (bus_generic_attach(dev)) {
346 device_printf(dev, "could not attach to bus\n");
347 goto cleanup;
348 }
349
350 return (0);
351
352 cleanup:
353 #ifdef notyet
354 intr_pic_unregister(dev, xref);
355 #endif
356 for (i = 1; i <= sc->sc_num_irq; i++) {
357 if (sc->pci_intrhand[i - 1] != NULL)
358 bus_teardown_intr(dev, sc->pci_res[i],
359 sc->pci_intrhand[i - 1]);
360 }
361 cleanup_rman:
362 mtk_pcie_phy_stop(dev);
363 rman_fini(&sc->sc_irq_rman);
364 rman_fini(&sc->sc_io_rman);
365 rman_fini(&sc->sc_mem_rman);
366 cleanup_res:
367 mt_sc = NULL;
368 if (sc->pci_res[0] != NULL)
369 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->pci_res[0]);
370 if (sc->pci_res[1] != NULL)
371 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pci_res[1]);
372 if (sc->pci_res[2] != NULL)
373 bus_release_resource(dev, SYS_RES_IRQ, 1, sc->pci_res[2]);
374 if (sc->pci_res[3] != NULL)
375 bus_release_resource(dev, SYS_RES_IRQ, 2, sc->pci_res[3]);
376 return (ENXIO);
377 }
378
379 static int
380 mtk_pci_read_ivar(device_t dev, device_t child, int which,
381 uintptr_t *result)
382 {
383 struct mtk_pci_softc *sc = device_get_softc(dev);
384
385 switch (which) {
386 case PCIB_IVAR_DOMAIN:
387 *result = device_get_unit(dev);
388 return (0);
389 case PCIB_IVAR_BUS:
390 *result = sc->sc_busno;
391 return (0);
392 }
393
394 return (ENOENT);
395 }
396
397 static int
398 mtk_pci_write_ivar(device_t dev, device_t child, int which,
399 uintptr_t result)
400 {
401 struct mtk_pci_softc *sc = device_get_softc(dev);
402
403 switch (which) {
404 case PCIB_IVAR_BUS:
405 sc->sc_busno = result;
406 return (0);
407 }
408
409 return (ENOENT);
410 }
411
412 static struct resource *
413 mtk_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
414 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
415 {
416 struct mtk_pci_softc *sc = device_get_softc(bus);
417 struct resource *rv;
418 struct rman *rm;
419
420 switch (type) {
421 case PCI_RES_BUS:
422 return pci_domain_alloc_bus(0, child, rid, start, end, count,
423 flags);
424 case SYS_RES_IRQ:
425 rm = &sc->sc_irq_rman;
426 break;
427 case SYS_RES_IOPORT:
428 rm = &sc->sc_io_rman;
429 break;
430 case SYS_RES_MEMORY:
431 rm = &sc->sc_mem_rman;
432 break;
433 default:
434 return (NULL);
435 }
436
437 rv = rman_reserve_resource(rm, start, end, count, flags, child);
438
439 if (rv == NULL)
440 return (NULL);
441
442 rman_set_rid(rv, *rid);
443
444 if ((flags & RF_ACTIVE) && type != SYS_RES_IRQ) {
445 if (bus_activate_resource(child, type, *rid, rv)) {
446 rman_release_resource(rv);
447 return (NULL);
448 }
449 }
450
451 return (rv);
452 }
453
454 static int
455 mtk_pci_release_resource(device_t bus, device_t child, int type, int rid,
456 struct resource *res)
457 {
458
459 if (type == PCI_RES_BUS)
460 return (pci_domain_release_bus(0, child, rid, res));
461
462 return (bus_generic_release_resource(bus, child, type, rid, res));
463 }
464
465 static int
466 mtk_pci_adjust_resource(device_t bus, device_t child, int type,
467 struct resource *res, rman_res_t start, rman_res_t end)
468 {
469 struct mtk_pci_softc *sc = device_get_softc(bus);
470 struct rman *rm;
471
472 switch (type) {
473 case PCI_RES_BUS:
474 return pci_domain_adjust_bus(0, child, res, start, end);
475 case SYS_RES_IRQ:
476 rm = &sc->sc_irq_rman;
477 break;
478 case SYS_RES_IOPORT:
479 rm = &sc->sc_io_rman;
480 break;
481 case SYS_RES_MEMORY:
482 rm = &sc->sc_mem_rman;
483 break;
484 default:
485 rm = NULL;
486 break;
487 }
488
489 if (rm != NULL)
490 return (rman_adjust_resource(res, start, end));
491
492 return (bus_generic_adjust_resource(bus, child, type, res, start, end));
493 }
494
495 static inline int
496 mtk_idx_to_irq(int idx)
497 {
498
499 return ((idx == 0) ? MTK_PCIE0_IRQ :
500 (idx == 1) ? MTK_PCIE1_IRQ :
501 (idx == 2) ? MTK_PCIE2_IRQ : -1);
502 }
503
504 static inline int
505 mtk_irq_to_idx(int irq)
506 {
507
508 return ((irq == MTK_PCIE0_IRQ) ? 0 :
509 (irq == MTK_PCIE1_IRQ) ? 1 :
510 (irq == MTK_PCIE2_IRQ) ? 2 : -1);
511 }
512
513 static void
514 mtk_pci_mask_irq(void *source)
515 {
516 MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
517 MT_READ32(mt_sc, MTK_PCI_PCIENA) & ~(1<<((int)source)));
518 }
519
520 static void
521 mtk_pci_unmask_irq(void *source)
522 {
523
524 MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
525 MT_READ32(mt_sc, MTK_PCI_PCIENA) | (1<<((int)source)));
526 }
527
528 static int
529 mtk_pci_setup_intr(device_t bus, device_t child, struct resource *ires,
530 int flags, driver_filter_t *filt, driver_intr_t *handler,
531 void *arg, void **cookiep)
532 {
533 struct mtk_pci_softc *sc = device_get_softc(bus);
534 struct intr_event *event;
535 int irq, error, irqidx;
536
537 irq = rman_get_start(ires);
538
539 if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
540 return (EINVAL);
541
542 irqidx = irq - sc->sc_irq_start;
543
544 event = sc->sc_eventstab[irqidx];
545 if (event == NULL) {
546 error = intr_event_create(&event, (void *)irq, 0, irq,
547 mtk_pci_mask_irq, mtk_pci_unmask_irq, NULL, NULL,
548 "pci intr%d:", irq);
549
550 if (error == 0) {
551 sc->sc_eventstab[irqidx] = event;
552 }
553 else {
554 return (error);
555 }
556 }
557
558 intr_event_add_handler(event, device_get_nameunit(child), filt,
559 handler, arg, intr_priority(flags), flags, cookiep);
560
561 mtk_pci_unmask_irq((void*)irq);
562
563 return (0);
564 }
565
566 static int
567 mtk_pci_teardown_intr(device_t dev, device_t child, struct resource *ires,
568 void *cookie)
569 {
570 struct mtk_pci_softc *sc = device_get_softc(dev);
571 int irq, result, irqidx;
572
573 irq = rman_get_start(ires);
574 if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
575 return (EINVAL);
576
577 irqidx = irq - sc->sc_irq_start;
578 if (sc->sc_eventstab[irqidx] == NULL)
579 panic("Trying to teardown unoccupied IRQ");
580
581 mtk_pci_mask_irq((void*)irq);
582
583 result = intr_event_remove_handler(cookie);
584 if (!result)
585 sc->sc_eventstab[irqidx] = NULL;
586
587
588 return (result);
589 }
590
591 static inline uint32_t
592 mtk_pci_make_addr(int bus, int slot, int func, int reg)
593 {
594 uint32_t addr;
595
596 addr = ((((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
597 (func << 8) | (reg & 0xfc) | (1 << 31));
598
599 return (addr);
600 }
601
602 static int
603 mtk_pci_maxslots(device_t dev)
604 {
605
606 return (PCI_SLOTMAX);
607 }
608
609 static inline int
610 mtk_pci_slot_has_link(device_t dev, int slot)
611 {
612 struct mtk_pci_softc *sc = device_get_softc(dev);
613
614 return !!(sc->pcie_link_status & (1<<slot));
615 }
616
617 static uint32_t
618 mtk_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func,
619 u_int reg, int bytes)
620 {
621 struct mtk_pci_softc *sc = device_get_softc(dev);
622 uint32_t addr = 0, data = 0;
623
624 /* Return ~0U if slot has no link */
625 if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0) {
626 return (~0U);
627 }
628
629 mtx_lock_spin(&mtk_pci_mtx);
630 addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
631 MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
632 switch (bytes % 4) {
633 case 0:
634 data = MT_READ32(sc, MTK_PCI_CFGDATA);
635 break;
636 case 1:
637 data = MT_READ8(sc, MTK_PCI_CFGDATA + (reg & 0x3));
638 break;
639 case 2:
640 data = MT_READ16(sc, MTK_PCI_CFGDATA + (reg & 0x3));
641 break;
642 default:
643 panic("%s(): Wrong number of bytes (%d) requested!\n",
644 __FUNCTION__, bytes % 4);
645 }
646 mtx_unlock_spin(&mtk_pci_mtx);
647
648 return (data);
649 }
650
651 static void
652 mtk_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func,
653 u_int reg, uint32_t val, int bytes)
654 {
655 struct mtk_pci_softc *sc = device_get_softc(dev);
656 uint32_t addr = 0, data = val;
657
658 /* Do not write if slot has no link */
659 if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0)
660 return;
661
662 mtx_lock_spin(&mtk_pci_mtx);
663 addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
664 MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
665 switch (bytes % 4) {
666 case 0:
667 MT_WRITE32(sc, MTK_PCI_CFGDATA, data);
668 break;
669 case 1:
670 MT_WRITE8(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
671 break;
672 case 2:
673 MT_WRITE16(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
674 break;
675 default:
676 panic("%s(): Wrong number of bytes (%d) requested!\n",
677 __FUNCTION__, bytes % 4);
678 }
679 mtx_unlock_spin(&mtk_pci_mtx);
680 }
681
682 static int
683 mtk_pci_route_interrupt(device_t pcib, device_t device, int pin)
684 {
685 int bus, sl, dev;
686
687 bus = pci_get_bus(device);
688 sl = pci_get_slot(device);
689 dev = pci_get_device(device);
690
691 if (bus != 0)
692 panic("Unexpected bus number %d\n", bus);
693
694 /* PCIe only */
695 switch (sl) {
696 case 0: return MTK_PCIE0_IRQ;
697 case 1: return MTK_PCIE0_IRQ + 1;
698 case 2: return MTK_PCIE0_IRQ + 2;
699 default: return (-1);
700 }
701
702 return (-1);
703 }
704
705 static device_method_t mtk_pci_methods[] = {
706 /* Device interface */
707 DEVMETHOD(device_probe, mtk_pci_probe),
708 DEVMETHOD(device_attach, mtk_pci_attach),
709 DEVMETHOD(device_shutdown, bus_generic_shutdown),
710 DEVMETHOD(device_suspend, bus_generic_suspend),
711 DEVMETHOD(device_resume, bus_generic_resume),
712
713 /* Bus interface */
714 DEVMETHOD(bus_read_ivar, mtk_pci_read_ivar),
715 DEVMETHOD(bus_write_ivar, mtk_pci_write_ivar),
716 DEVMETHOD(bus_alloc_resource, mtk_pci_alloc_resource),
717 DEVMETHOD(bus_release_resource, mtk_pci_release_resource),
718 DEVMETHOD(bus_adjust_resource, mtk_pci_adjust_resource),
719 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
720 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
721 DEVMETHOD(bus_setup_intr, mtk_pci_setup_intr),
722 DEVMETHOD(bus_teardown_intr, mtk_pci_teardown_intr),
723
724 /* pcib interface */
725 DEVMETHOD(pcib_maxslots, mtk_pci_maxslots),
726 DEVMETHOD(pcib_read_config, mtk_pci_read_config),
727 DEVMETHOD(pcib_write_config, mtk_pci_write_config),
728 DEVMETHOD(pcib_route_interrupt, mtk_pci_route_interrupt),
729 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
730
731 /* OFW bus interface */
732 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
733 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
734 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
735 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
736 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
737
738 DEVMETHOD_END
739 };
740
741 static driver_t mtk_pci_driver = {
742 "pcib",
743 mtk_pci_methods,
744 sizeof(struct mtk_pci_softc),
745 };
746
747 static devclass_t mtk_pci_devclass;
748
749 DRIVER_MODULE(mtk_pci, simplebus, mtk_pci_driver, mtk_pci_devclass, 0, 0);
750
751 /* Our interrupt handler */
752 static int
753 mtk_pci_intr(void *arg)
754 {
755 struct mtk_pci_softc *sc = arg;
756 struct intr_event *event;
757 uint32_t reg, irq, irqidx;
758
759 reg = MT_READ32(sc, MTK_PCI_PCIINT);
760
761 for (irq = sc->sc_irq_start; irq <= sc->sc_irq_end; irq++) {
762 if (reg & (1u<<irq)) {
763 irqidx = irq - sc->sc_irq_start;
764 event = sc->sc_eventstab[irqidx];
765 if (!event || CK_SLIST_EMPTY(&event->ie_handlers)) {
766 if (irq != 0)
767 printf("Stray PCI IRQ %d\n", irq);
768 continue;
769 }
770
771 intr_event_handle(event, NULL);
772 }
773 }
774
775 return (FILTER_HANDLED);
776 }
777
778 /* PCIe SoC-specific initialization */
779 static int
780 mtk_pcie_phy_init(device_t dev)
781 {
782 struct mtk_pci_softc *sc;
783
784 /* Get our softc */
785 sc = device_get_softc(dev);
786
787 /* We don't know how many slots we have yet */
788 sc->num_slots = 0;
789
790 /* Handle SoC specific PCIe init */
791 switch (sc->socid) {
792 case MTK_SOC_MT7628: /* Fallthrough */
793 case MTK_SOC_MT7688:
794 if (mtk_pcie_phy_mt7628_init(dev))
795 return (ENXIO);
796 break;
797 case MTK_SOC_MT7621:
798 if (mtk_pcie_phy_mt7621_init(dev))
799 return (ENXIO);
800 break;
801 case MTK_SOC_MT7620A:
802 if (mtk_pcie_phy_mt7620_init(dev))
803 return (ENXIO);
804 break;
805 case MTK_SOC_RT3662: /* Fallthrough */
806 case MTK_SOC_RT3883:
807 if (mtk_pcie_phy_rt3883_init(dev))
808 return (ENXIO);
809 break;
810 default:
811 device_printf(dev, "unsupported device %x\n", sc->socid);
812 return (ENXIO);
813 }
814
815 /*
816 * If we were successful so far go and set up the PCIe slots, so we
817 * may allocate mem/io/irq resources and enumerate busses later.
818 */
819 mtk_pcie_phy_setup_slots(dev);
820
821 return (0);
822 }
823
824 static int
825 mtk_pcie_phy_start(device_t dev)
826 {
827 struct mtk_pci_softc *sc = device_get_softc(dev);
828
829 if (sc->socid == MTK_SOC_MT7621 &&
830 (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
831 SYSCTL_MT7621_REV_E) {
832 if (fdt_reset_assert_all(dev))
833 return (ENXIO);
834 } else {
835 if (fdt_reset_deassert_all(dev))
836 return (ENXIO);
837 }
838
839 if (fdt_clock_enable_all(dev))
840 return (ENXIO);
841
842 return (0);
843 }
844
845 static int
846 mtk_pcie_phy_stop(device_t dev)
847 {
848 struct mtk_pci_softc *sc = device_get_softc(dev);
849
850 if (sc->socid == MTK_SOC_MT7621 &&
851 (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
852 SYSCTL_MT7621_REV_E) {
853 if (fdt_reset_deassert_all(dev))
854 return (ENXIO);
855 } else {
856 if (fdt_reset_assert_all(dev))
857 return (ENXIO);
858 }
859
860 if (fdt_clock_disable_all(dev))
861 return (ENXIO);
862
863 return (0);
864 }
865
866 #define mtk_pcie_phy_set(_sc, _reg, _s, _n, _v) \
867 MT_WRITE32((_sc), (_reg), ((MT_READ32((_sc), (_reg)) & \
868 (~(((1ull << (_n)) - 1) << (_s)))) | ((_v) << (_s))))
869
870 static void
871 mtk_pcie_phy_mt7621_bypass_pipe_rst(struct mtk_pci_softc *sc, uint32_t off)
872 {
873
874 mtk_pcie_phy_set(sc, off + 0x002c, 12, 1, 1);
875 mtk_pcie_phy_set(sc, off + 0x002c, 4, 1, 1);
876 mtk_pcie_phy_set(sc, off + 0x012c, 12, 1, 1);
877 mtk_pcie_phy_set(sc, off + 0x012c, 4, 1, 1);
878 mtk_pcie_phy_set(sc, off + 0x102c, 12, 1, 1);
879 mtk_pcie_phy_set(sc, off + 0x102c, 4, 1, 1);
880 }
881
882 static void
883 mtk_pcie_phy_mt7621_setup_ssc(struct mtk_pci_softc *sc, uint32_t off)
884 {
885 uint32_t xtal_sel;
886
887 xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
888 xtal_sel &= 0x7;
889
890 mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1);
891 mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0);
892 mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1);
893 mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 1);
894 mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0);
895 mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 0);
896
897 if (xtal_sel <= 5 && xtal_sel >= 3) {
898 mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 1);
899 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x1a);
900 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
901 } else {
902 mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0);
903 if (xtal_sel >= 6) {
904 mtk_pcie_phy_set(sc, off + 0x4bc, 4, 2, 0x01);
905 mtk_pcie_phy_set(sc, off + 0x49c, 0, 31, 0x18000000);
906 mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x18d);
907 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x4a);
908 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x4a);
909 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x11);
910 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x11);
911 } else {
912 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 12, 0x1a);
913 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
914 }
915 }
916
917 mtk_pcie_phy_set(sc, off + 0x4a0, 5, 1, 1);
918 mtk_pcie_phy_set(sc, off + 0x490, 22, 2, 2);
919 mtk_pcie_phy_set(sc, off + 0x490, 18, 4, 6);
920 mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 2);
921 mtk_pcie_phy_set(sc, off + 0x490, 8, 4, 1);
922 mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 0);
923 mtk_pcie_phy_set(sc, off + 0x490, 1, 3, 2);
924
925 if (xtal_sel <= 5 && xtal_sel >= 3) {
926 mtk_pcie_phy_set(sc, off + 0x414, 6, 2, 1);
927 mtk_pcie_phy_set(sc, off + 0x414, 5, 1, 1);
928 }
929
930 mtk_pcie_phy_set(sc, off + 0x414, 28, 2, 1);
931 mtk_pcie_phy_set(sc, off + 0x040, 17, 4, 7);
932 mtk_pcie_phy_set(sc, off + 0x040, 16, 1, 1);
933 mtk_pcie_phy_set(sc, off + 0x140, 17, 4, 7);
934 mtk_pcie_phy_set(sc, off + 0x140, 16, 1, 1);
935
936 mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1);
937 mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 1);
938 mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0);
939 mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 0);
940 }
941
942 /* XXX: ugly, we need to fix this at some point */
943 #define MT7621_GPIO_CTRL0 *((volatile uint32_t *)0xbe000600)
944 #define MT7621_GPIO_DATA0 *((volatile uint32_t *)0xbe000620)
945
946 #define mtk_gpio_clr_set(_reg, _clr, _set) \
947 do { \
948 (_reg) = ((_reg) & (_clr)) | (_set); \
949 } while (0)
950
951 static int
952 mtk_pcie_phy_mt7621_init(device_t dev)
953 {
954 struct mtk_pci_softc *sc = device_get_softc(dev);
955
956 /* First off, stop the PHY */
957 if (mtk_pcie_phy_stop(dev))
958 return (ENXIO);
959
960 /* PCIe resets are GPIO pins */
961 mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7621_PERST_GPIO_MODE |
962 MT7621_UARTL3_GPIO_MODE, MT7621_PERST_GPIO | MT7621_UARTL3_GPIO);
963
964 /* Set GPIO pins as outputs */
965 mtk_gpio_clr_set(MT7621_GPIO_CTRL0, 0, MT7621_PCIE_RST);
966
967 /* Assert resets to PCIe devices */
968 mtk_gpio_clr_set(MT7621_GPIO_DATA0, MT7621_PCIE_RST, 0);
969
970 /* Give everything a chance to sink in */
971 DELAY(100000);
972
973 /* Now start the PHY again */
974 if (mtk_pcie_phy_start(dev))
975 return (ENXIO);
976
977 /* Wait for things to settle */
978 DELAY(100000);
979
980 /* Only apply below to REV-E hardware */
981 if ((mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) ==
982 SYSCTL_MT7621_REV_E)
983 mtk_pcie_phy_mt7621_bypass_pipe_rst(sc, 0x9000);
984
985 /* Setup PCIe ports 0 and 1 */
986 mtk_pcie_phy_mt7621_setup_ssc(sc, 0x9000);
987 /* Setup PCIe port 2 */
988 mtk_pcie_phy_mt7621_setup_ssc(sc, 0xa000);
989
990 /* Deassert resets to PCIe devices */
991 mtk_gpio_clr_set(MT7621_GPIO_DATA0, 0, MT7621_PCIE_RST);
992
993 /* Set number of slots supported */
994 sc->num_slots = 3;
995
996 /* Give it a chance to sink in */
997 DELAY(100000);
998
999 return (0);
1000 }
1001
1002 static void
1003 mtk_pcie_phy_mt7628_setup(struct mtk_pci_softc *sc, uint32_t off)
1004 {
1005 uint32_t xtal_sel;
1006
1007 xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
1008 xtal_sel &= 0x1;
1009
1010 mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1);
1011 mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0);
1012 mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1);
1013 mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0);
1014 mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 3);
1015
1016 if (xtal_sel == 1) {
1017 mtk_pcie_phy_set(sc, off + 0x4bc, 24, 8, 0x7d);
1018 mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 0x08);
1019 mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0x01);
1020 mtk_pcie_phy_set(sc, off + 0x4c0, 0, 32, 0x1f400000);
1021 mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x013d);
1022 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x74);
1023 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 16, 0x74);
1024 } else {
1025 mtk_pcie_phy_set(sc, off + 0x4bc, 24, 8, 0x64);
1026 mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 0x0a);
1027 mtk_pcie_phy_set(sc, off + 0x490, 6, 2, 0x00);
1028 mtk_pcie_phy_set(sc, off + 0x4c0, 0, 32, 0x19000000);
1029 mtk_pcie_phy_set(sc, off + 0x4a4, 0, 16, 0x018d);
1030 mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x4a);
1031 mtk_pcie_phy_set(sc, off + 0x4a8, 0, 16, 0x4a);
1032 }
1033
1034 mtk_pcie_phy_set(sc, off + 0x498, 0, 8, 5);
1035 mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1);
1036 mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0);
1037 }
1038
1039 static int
1040 mtk_pcie_phy_mt7628_init(device_t dev)
1041 {
1042 struct mtk_pci_softc *sc = device_get_softc(dev);
1043
1044 /* Set PCIe reset to normal mode */
1045 mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7628_PERST_GPIO_MODE,
1046 MT7628_PERST);
1047
1048 /* Start the PHY */
1049 if (mtk_pcie_phy_start(dev))
1050 return (ENXIO);
1051
1052 /* Give it a chance to sink in */
1053 DELAY(100000);
1054
1055 /* Setup the PHY */
1056 mtk_pcie_phy_mt7628_setup(sc, 0x9000);
1057
1058 /* Deassert PCIe device reset */
1059 MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1060
1061 /* Set number of slots supported */
1062 sc->num_slots = 1;
1063
1064 return (0);
1065 }
1066
1067 static int
1068 mtk_pcie_phy_mt7620_wait_busy(struct mtk_pci_softc *sc)
1069 {
1070 uint32_t reg_value, retry;
1071
1072 reg_value = retry = 0;
1073
1074 while (retry++ < MT7620_MAX_RETRIES) {
1075 reg_value = MT_READ32(sc, MT7620_PCIE_PHY_CFG);
1076 if (reg_value & PHY_BUSY)
1077 DELAY(100000);
1078 else
1079 break;
1080 }
1081
1082 if (retry >= MT7620_MAX_RETRIES)
1083 return (ENXIO);
1084
1085 return (0);
1086 }
1087
1088 static int
1089 mtk_pcie_phy_mt7620_set(struct mtk_pci_softc *sc, uint32_t reg,
1090 uint32_t val)
1091 {
1092 uint32_t reg_val;
1093
1094 if (mtk_pcie_phy_mt7620_wait_busy(sc))
1095 return (ENXIO);
1096
1097 reg_val = PHY_MODE_WRITE | ((reg & 0xff) << PHY_ADDR_OFFSET) |
1098 (val & 0xff);
1099 MT_WRITE32(sc, MT7620_PCIE_PHY_CFG, reg_val);
1100 DELAY(1000);
1101
1102 if (mtk_pcie_phy_mt7620_wait_busy(sc))
1103 return (ENXIO);
1104
1105 return (0);
1106 }
1107
1108 static int
1109 mtk_pcie_phy_mt7620_init(device_t dev)
1110 {
1111 struct mtk_pci_softc *sc = device_get_softc(dev);
1112
1113 /*
1114 * The below sets the PCIe PHY to bypass the PCIe DLL and enables
1115 * "elastic buffer control", whatever that may be...
1116 */
1117 if (mtk_pcie_phy_mt7620_set(sc, 0x00, 0x80) ||
1118 mtk_pcie_phy_mt7620_set(sc, 0x01, 0x04) ||
1119 mtk_pcie_phy_mt7620_set(sc, 0x68, 0x84))
1120 return (ENXIO);
1121
1122 /* Stop PCIe */
1123 if (mtk_pcie_phy_stop(dev))
1124 return (ENXIO);
1125
1126 /* Restore PPLL to a sane state before going on */
1127 mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVPD, PDRV_SW_SET);
1128
1129 /* No PCIe on the MT7620N */
1130 if (!(mtk_sysctl_get(SYSCTL_REVID) & MT7620_PKG_BGA)) {
1131 device_printf(dev, "PCIe disabled for MT7620N\n");
1132 mtk_sysctl_clr_set(MT7620_PPLL_CFG0, 0, PPLL_SW_SET);
1133 mtk_sysctl_clr_set(MT7620_PPLL_CFG1, 0, PPLL_PD);
1134 return (ENXIO);
1135 }
1136
1137 /* PCIe device reset pin is in normal mode */
1138 mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7620_PERST_GPIO_MODE,
1139 MT7620_PERST);
1140
1141 /* Enable PCIe now */
1142 if (mtk_pcie_phy_start(dev))
1143 return (ENXIO);
1144
1145 /* Give it a chance to sink in */
1146 DELAY(100000);
1147
1148 /* If PLL is not locked - bail */
1149 if (!(mtk_sysctl_get(MT7620_PPLL_CFG1) & PPLL_LOCKED)) {
1150 device_printf(dev, "no PPLL not lock\n");
1151 mtk_pcie_phy_stop(dev);
1152 return (ENXIO);
1153 }
1154
1155 /* Configure PCIe PLL */
1156 mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVOHZ | LC_CKDRVHZ,
1157 LC_CKDRVPD | PDRV_SW_SET);
1158
1159 /* and give it a chance to settle */
1160 DELAY(100000);
1161
1162 /* Deassert PCIe device reset */
1163 MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1164
1165 /* MT7620 supports one PCIe slot */
1166 sc->num_slots = 1;
1167
1168 return (0);
1169 }
1170
1171 static int
1172 mtk_pcie_phy_rt3883_init(device_t dev)
1173 {
1174 struct mtk_pci_softc *sc = device_get_softc(dev);
1175
1176 /* Enable PCI host mode and PCIe RC mode */
1177 mtk_sysctl_clr_set(SYSCTL_SYSCFG1, 0, RT3883_PCI_HOST_MODE |
1178 RT3883_PCIE_RC_MODE);
1179
1180 /* Enable PCIe PHY */
1181 if (mtk_pcie_phy_start(dev))
1182 return (ENXIO);
1183
1184 /* Disable PCI, we only support PCIe for now */
1185 mtk_sysctl_clr_set(SYSCTL_RSTCTRL, 0, RT3883_PCI_RST);
1186 mtk_sysctl_clr_set(SYSCTL_CLKCFG1, RT3883_PCI_CLK, 0);
1187
1188 /* Give things a chance to sink in */
1189 DELAY(500000);
1190
1191 /* Set PCIe port number to 0 and lift PCIe reset */
1192 MT_WRITE32(sc, MTK_PCI_PCICFG, 0);
1193
1194 /* Configure PCI Arbiter */
1195 MT_WRITE32(sc, MTK_PCI_ARBCTL, 0x79);
1196
1197 /* We have a single PCIe slot */
1198 sc->num_slots = 1;
1199
1200 return (0);
1201 }
1202
1203 static void
1204 mtk_pcie_phy_setup_slots(device_t dev)
1205 {
1206 struct mtk_pci_softc *sc = device_get_softc(dev);
1207 uint32_t bar0_val, val;
1208 int i;
1209
1210 /* Disable all PCIe interrupts */
1211 MT_WRITE32(sc, MTK_PCI_PCIENA, 0);
1212
1213 /* Default bar0_val is 64M, enabled */
1214 bar0_val = 0x03FF0001;
1215
1216 /* But we override it to 2G, enabled for some SoCs */
1217 if (sc->socid == MTK_SOC_MT7620A || sc->socid == MTK_SOC_MT7628 ||
1218 sc->socid == MTK_SOC_MT7688 || sc->socid == MTK_SOC_MT7621)
1219 bar0_val = 0x7FFF0001;
1220
1221 /* We still don't know which slots have linked up */
1222 sc->pcie_link_status = 0;
1223
1224 /* XXX: I am not sure if this delay is really necessary */
1225 DELAY(500000);
1226
1227 /*
1228 * See which slots have links and mark them.
1229 * Set up all slots' BARs and make them look like PCIe bridges.
1230 */
1231 for (i = 0; i < sc->num_slots; i++) {
1232 /* If slot has link - mark it */
1233 if (MT_READ32(sc, MTK_PCIE_STATUS(i)) & 1)
1234 sc->pcie_link_status |= (1<<i);
1235 else
1236 continue;
1237
1238 /* Generic slot configuration follows */
1239
1240 /* We enable BAR0 */
1241 MT_WRITE32(sc, MTK_PCIE_BAR0SETUP(i), bar0_val);
1242 /* and disable BAR1 */
1243 MT_WRITE32(sc, MTK_PCIE_BAR1SETUP(i), 0);
1244 /* Internal memory base has no offset */
1245 MT_WRITE32(sc, MTK_PCIE_IMBASEBAR0(i), 0);
1246 /* We're a PCIe bridge */
1247 MT_WRITE32(sc, MTK_PCIE_CLASS(i), 0x06040001);
1248
1249 val = mtk_pci_read_config(dev, 0, i, 0, 0x4, 4);
1250 mtk_pci_write_config(dev, 0, i, 0, 0x4, val | 0x4, 4);
1251 val = mtk_pci_read_config(dev, 0, i, 0, 0x70c, 4);
1252 val &= ~(0xff << 8);
1253 val |= (0x50 << 8);
1254 mtk_pci_write_config(dev, 0, i, 0, 0x70c, val, 4);
1255
1256 mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEL_1, 0xff, 1);
1257 mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEH_1, 0xffff, 2);
1258 mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITL_1, 0, 1);
1259 mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITH_1, 0, 2);
1260 mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMBASE_1, 0xffff, 2);
1261 mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMLIMIT_1, 0, 2);
1262 mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEL_1, 0xffff, 2);
1263 mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEH_1, 0xffffffff,
1264 4);
1265 mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITL_1, 0, 2);
1266 mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITH_1, 0, 4);
1267 }
1268 }
Cache object: 6592518ea4abec70c8d1c6d6da093123
|