FreeBSD/Linux Kernel Cross Reference
sys/dev/mgb/if_mgb.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation, Inc.
5 *
6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
35 *
36 * Product information:
37 * LAN7430 https://www.microchip.com/wwwproducts/en/LAN7430
38 * - Integrated IEEE 802.3 compliant PHY
39 * LAN7431 https://www.microchip.com/wwwproducts/en/LAN7431
40 * - RGMII Interface
41 *
42 * This driver uses the iflib interface and the default 'ukphy' PHY driver.
43 *
44 * UNIMPLEMENTED FEATURES
45 * ----------------------
46 * A number of features supported by LAN743X device are not yet implemented in
47 * this driver:
48 *
49 * - Multiple (up to 4) RX queues support
50 * - Just needs to remove asserts and malloc multiple `rx_ring_data`
51 * structs based on ncpus.
52 * - RX/TX Checksum Offloading support
53 * - VLAN support
54 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
55 * - Wake on LAN (WoL) support
56 * - TX LSO support
57 * - Receive Side Scaling (RSS) support
58 * - Debugging Capabilities:
59 * - Could include MAC statistics and
60 * error status registers in sysctl.
61 */
62
63 #include <sys/param.h>
64 #include <sys/bus.h>
65 #include <sys/endian.h>
66 #include <sys/kdb.h>
67 #include <sys/kernel.h>
68 #include <sys/module.h>
69 #include <sys/rman.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
74
75 #include <net/ethernet.h>
76 #include <net/if.h>
77 #include <net/if_var.h>
78 #include <net/if_types.h>
79 #include <net/if_media.h>
80 #include <net/iflib.h>
81
82 #include <dev/mgb/if_mgb.h>
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcivar.h>
87
88 #include "ifdi_if.h"
89 #include "miibus_if.h"
90
91 static pci_vendor_info_t mgb_vendor_info_array[] = {
92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
93 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
94 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
95 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
96 PVID_END
97 };
98
99 /* Device methods */
100 static device_register_t mgb_register;
101
102 /* IFLIB methods */
103 static ifdi_attach_pre_t mgb_attach_pre;
104 static ifdi_attach_post_t mgb_attach_post;
105 static ifdi_detach_t mgb_detach;
106
107 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc;
108 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc;
109 static ifdi_queues_free_t mgb_queues_free;
110
111 static ifdi_init_t mgb_init;
112 static ifdi_stop_t mgb_stop;
113
114 static ifdi_msix_intr_assign_t mgb_msix_intr_assign;
115 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable;
116 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable;
117 static ifdi_intr_enable_t mgb_intr_enable_all;
118 static ifdi_intr_disable_t mgb_intr_disable_all;
119
120 /* IFLIB_TXRX methods */
121 static int mgb_isc_txd_encap(void *,
122 if_pkt_info_t);
123 static void mgb_isc_txd_flush(void *,
124 uint16_t, qidx_t);
125 static int mgb_isc_txd_credits_update(void *,
126 uint16_t, bool);
127 static int mgb_isc_rxd_available(void *,
128 uint16_t, qidx_t, qidx_t);
129 static int mgb_isc_rxd_pkt_get(void *,
130 if_rxd_info_t);
131 static void mgb_isc_rxd_refill(void *,
132 if_rxd_update_t);
133 static void mgb_isc_rxd_flush(void *,
134 uint16_t, uint8_t, qidx_t);
135
136 /* Interrupts */
137 static driver_filter_t mgb_legacy_intr;
138 static driver_filter_t mgb_admin_intr;
139 static driver_filter_t mgb_rxq_intr;
140 static bool mgb_intr_test(struct mgb_softc *);
141
142 /* MII methods */
143 static miibus_readreg_t mgb_miibus_readreg;
144 static miibus_writereg_t mgb_miibus_writereg;
145 static miibus_linkchg_t mgb_miibus_linkchg;
146 static miibus_statchg_t mgb_miibus_statchg;
147
148 static int mgb_media_change(if_t);
149 static void mgb_media_status(if_t,
150 struct ifmediareq *);
151
152 /* Helper/Test functions */
153 static int mgb_test_bar(struct mgb_softc *);
154 static int mgb_alloc_regs(struct mgb_softc *);
155 static int mgb_release_regs(struct mgb_softc *);
156
157 static void mgb_get_ethaddr(struct mgb_softc *,
158 struct ether_addr *);
159
160 static int mgb_wait_for_bits(struct mgb_softc *,
161 int, int, int);
162
163 /* H/W init, reset and teardown helpers */
164 static int mgb_hw_init(struct mgb_softc *);
165 static int mgb_hw_teardown(struct mgb_softc *);
166 static int mgb_hw_reset(struct mgb_softc *);
167 static int mgb_mac_init(struct mgb_softc *);
168 static int mgb_dmac_reset(struct mgb_softc *);
169 static int mgb_phy_reset(struct mgb_softc *);
170
171 static int mgb_dma_init(struct mgb_softc *);
172 static int mgb_dma_tx_ring_init(struct mgb_softc *,
173 int);
174 static int mgb_dma_rx_ring_init(struct mgb_softc *,
175 int);
176
177 static int mgb_dmac_control(struct mgb_softc *,
178 int, int, enum mgb_dmac_cmd);
179 static int mgb_fct_control(struct mgb_softc *,
180 int, int, enum mgb_fct_cmd);
181
182 /*********************************************************************
183 * FreeBSD Device Interface Entry Points
184 *********************************************************************/
185
186 static device_method_t mgb_methods[] = {
187 /* Device interface */
188 DEVMETHOD(device_register, mgb_register),
189 DEVMETHOD(device_probe, iflib_device_probe),
190 DEVMETHOD(device_attach, iflib_device_attach),
191 DEVMETHOD(device_detach, iflib_device_detach),
192 DEVMETHOD(device_shutdown, iflib_device_shutdown),
193 DEVMETHOD(device_suspend, iflib_device_suspend),
194 DEVMETHOD(device_resume, iflib_device_resume),
195
196 /* MII Interface */
197 DEVMETHOD(miibus_readreg, mgb_miibus_readreg),
198 DEVMETHOD(miibus_writereg, mgb_miibus_writereg),
199 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg),
200 DEVMETHOD(miibus_statchg, mgb_miibus_statchg),
201
202 DEVMETHOD_END
203 };
204
205 static driver_t mgb_driver = {
206 "mgb", mgb_methods, sizeof(struct mgb_softc)
207 };
208
209 devclass_t mgb_devclass;
210 DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL);
211 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
212 MODULE_VERSION(mgb, 1);
213
214 #if 0 /* MIIBUS_DEBUG */
215 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
216 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL,
217 SI_ORDER_ANY);
218 #endif /* MIIBUS_DEBUG */
219 DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL);
220
221 MODULE_DEPEND(mgb, pci, 1, 1, 1);
222 MODULE_DEPEND(mgb, ether, 1, 1, 1);
223 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
224 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
225
226 static device_method_t mgb_iflib_methods[] = {
227 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
228 DEVMETHOD(ifdi_attach_post, mgb_attach_post),
229 DEVMETHOD(ifdi_detach, mgb_detach),
230
231 DEVMETHOD(ifdi_init, mgb_init),
232 DEVMETHOD(ifdi_stop, mgb_stop),
233
234 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
235 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
236 DEVMETHOD(ifdi_queues_free, mgb_queues_free),
237
238 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
239 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
240 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
241 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
242 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
243
244 #if 0 /* Not yet implemented IFLIB methods */
245 /*
246 * Set multicast addresses, mtu and promiscuous mode
247 */
248 DEVMETHOD(ifdi_multi_set, mgb_multi_set),
249 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
250 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
251
252 /*
253 * Needed for VLAN support
254 */
255 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
256 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
257
258 /*
259 * Needed for WOL support
260 * at the very least.
261 */
262 DEVMETHOD(ifdi_shutdown, mgb_shutdown),
263 DEVMETHOD(ifdi_suspend, mgb_suspend),
264 DEVMETHOD(ifdi_resume, mgb_resume),
265 #endif /* UNUSED_IFLIB_METHODS */
266 DEVMETHOD_END
267 };
268
269 static driver_t mgb_iflib_driver = {
270 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
271 };
272
273 struct if_txrx mgb_txrx = {
274 .ift_txd_encap = mgb_isc_txd_encap,
275 .ift_txd_flush = mgb_isc_txd_flush,
276 .ift_txd_credits_update = mgb_isc_txd_credits_update,
277 .ift_rxd_available = mgb_isc_rxd_available,
278 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
279 .ift_rxd_refill = mgb_isc_rxd_refill,
280 .ift_rxd_flush = mgb_isc_rxd_flush,
281
282 .ift_legacy_intr = mgb_legacy_intr
283 };
284
285 struct if_shared_ctx mgb_sctx_init = {
286 .isc_magic = IFLIB_MAGIC,
287
288 .isc_q_align = PAGE_SIZE,
289 .isc_admin_intrcnt = 1,
290 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
291
292 .isc_vendor_info = mgb_vendor_info_array,
293 .isc_driver_version = "1",
294 .isc_driver = &mgb_iflib_driver,
295 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */
296 .isc_ntxqs = 2,
297
298 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES,
299 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
300 .isc_tx_maxsegsize = MCLBYTES,
301
302 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
303 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
304 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
305
306 .isc_nrxqs = 2,
307
308 .isc_rx_maxsize = MCLBYTES,
309 .isc_rx_nsegments = 1,
310 .isc_rx_maxsegsize = MCLBYTES,
311
312 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
313 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
314 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
315
316 .isc_nfl = 1, /*one free list since there is only one queue */
317 #if 0 /* UNUSED_CTX */
318
319 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
320 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
321 #endif /* UNUSED_CTX */
322 };
323
324 /*********************************************************************/
325
326 static void *
327 mgb_register(device_t dev)
328 {
329
330 return (&mgb_sctx_init);
331 }
332
333 static int
334 mgb_attach_pre(if_ctx_t ctx)
335 {
336 struct mgb_softc *sc;
337 if_softc_ctx_t scctx;
338 int error, phyaddr, rid;
339 struct ether_addr hwaddr;
340 struct mii_data *miid;
341
342 sc = iflib_get_softc(ctx);
343 sc->ctx = ctx;
344 sc->dev = iflib_get_dev(ctx);
345 scctx = iflib_get_softc_ctx(ctx);
346
347 /* IFLIB required setup */
348 scctx->isc_txrx = &mgb_txrx;
349 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
350 /* Ring desc queues */
351 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
352 scctx->isc_ntxd[0];
353 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
354 scctx->isc_nrxd[0];
355
356 /* Head WB queues */
357 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
358 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
359
360 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
361 scctx->isc_nrxqsets = 1;
362 scctx->isc_ntxqsets = 1;
363
364 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
365 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
366 scctx->isc_tx_csum_flags = 0;
367 scctx->isc_capabilities = scctx->isc_capenable = 0;
368 #if 0
369 /*
370 * CSUM, TSO and VLAN support are TBD
371 */
372 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
373 IFCAP_TSO4 | IFCAP_TSO6 |
374 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
375 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
376 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
377 IFCAP_JUMBO_MTU;
378 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
379 #endif
380
381 /* get the BAR */
382 error = mgb_alloc_regs(sc);
383 if (error != 0) {
384 device_printf(sc->dev,
385 "Unable to allocate bus resource: registers.\n");
386 goto fail;
387 }
388
389 error = mgb_test_bar(sc);
390 if (error != 0)
391 goto fail;
392
393 error = mgb_hw_init(sc);
394 if (error != 0) {
395 device_printf(sc->dev,
396 "MGB device init failed. (err: %d)\n", error);
397 goto fail;
398 }
399
400 switch (pci_get_device(sc->dev))
401 {
402 case MGB_LAN7430_DEVICE_ID:
403 phyaddr = 1;
404 break;
405 case MGB_LAN7431_DEVICE_ID:
406 default:
407 phyaddr = MII_PHY_ANY;
408 break;
409 }
410
411 /* XXX: Would be nice(r) if locked methods were here */
412 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
413 mgb_media_change, mgb_media_status,
414 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
415 if (error != 0) {
416 device_printf(sc->dev, "Failed to attach MII interface\n");
417 goto fail;
418 }
419
420 miid = device_get_softc(sc->miibus);
421 scctx->isc_media = &miid->mii_media;
422
423 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
424 /** Setup PBA BAR **/
425 rid = pci_msix_pba_bar(sc->dev);
426 if (rid != scctx->isc_msix_bar) {
427 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
428 &rid, RF_ACTIVE);
429 if (sc->pba == NULL) {
430 error = ENXIO;
431 device_printf(sc->dev, "Failed to setup PBA BAR\n");
432 goto fail;
433 }
434 }
435
436 mgb_get_ethaddr(sc, &hwaddr);
437 if (ETHER_IS_BROADCAST(hwaddr.octet) ||
438 ETHER_IS_MULTICAST(hwaddr.octet) ||
439 ETHER_IS_ZERO(hwaddr.octet))
440 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
441
442 /*
443 * XXX: if the MAC address was generated the linux driver
444 * writes it back to the device.
445 */
446 iflib_set_mac(ctx, hwaddr.octet);
447
448 /* Map all vectors to vector 0 (admin interrupts) by default. */
449 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
450 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
451 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
452
453 return (0);
454
455 fail:
456 mgb_detach(ctx);
457 return (error);
458 }
459
460 static int
461 mgb_attach_post(if_ctx_t ctx)
462 {
463 struct mgb_softc *sc;
464
465 sc = iflib_get_softc(ctx);
466
467 device_printf(sc->dev, "Interrupt test: %s\n",
468 (mgb_intr_test(sc) ? "PASS" : "FAIL"));
469
470 return (0);
471 }
472
473 static int
474 mgb_detach(if_ctx_t ctx)
475 {
476 struct mgb_softc *sc;
477 int error;
478
479 sc = iflib_get_softc(ctx);
480
481 /* XXX: Should report errors but still detach everything. */
482 error = mgb_hw_teardown(sc);
483
484 /* Release IRQs */
485 iflib_irq_free(ctx, &sc->rx_irq);
486 iflib_irq_free(ctx, &sc->admin_irq);
487
488 if (sc->miibus != NULL)
489 device_delete_child(sc->dev, sc->miibus);
490
491 if (sc->pba != NULL)
492 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
493 rman_get_rid(sc->pba), sc->pba);
494 sc->pba = NULL;
495
496 error = mgb_release_regs(sc);
497
498 return (error);
499 }
500
501 static int
502 mgb_media_change(if_t ifp)
503 {
504 struct mii_data *miid;
505 struct mii_softc *miisc;
506 struct mgb_softc *sc;
507 if_ctx_t ctx;
508 int needs_reset;
509
510 ctx = if_getsoftc(ifp);
511 sc = iflib_get_softc(ctx);
512 miid = device_get_softc(sc->miibus);
513 LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
514 PHY_RESET(miisc);
515
516 needs_reset = mii_mediachg(miid);
517 if (needs_reset != 0)
518 ifp->if_init(ctx);
519 return (needs_reset);
520 }
521
522 static void
523 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
524 {
525 struct mgb_softc *sc;
526 struct mii_data *miid;
527
528 sc = iflib_get_softc(if_getsoftc(ifp));
529 miid = device_get_softc(sc->miibus);
530 if ((if_getflags(ifp) & IFF_UP) == 0)
531 return;
532
533 mii_pollstat(miid);
534 ifmr->ifm_active = miid->mii_media_active;
535 ifmr->ifm_status = miid->mii_media_status;
536 }
537
538 static int
539 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
540 int ntxqsets)
541 {
542 struct mgb_softc *sc;
543 struct mgb_ring_data *rdata;
544 int q;
545
546 sc = iflib_get_softc(ctx);
547 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
548 rdata = &sc->tx_ring_data;
549 for (q = 0; q < ntxqsets; q++) {
550 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
551 /* Ring */
552 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
553 rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
554
555 /* Head WB */
556 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
557 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
558 }
559 return 0;
560 }
561
562 static int
563 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
564 int nrxqsets)
565 {
566 struct mgb_softc *sc;
567 struct mgb_ring_data *rdata;
568 int q;
569
570 sc = iflib_get_softc(ctx);
571 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
572 rdata = &sc->rx_ring_data;
573 for (q = 0; q < nrxqsets; q++) {
574 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
575 /* Ring */
576 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
577 rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
578
579 /* Head WB */
580 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
581 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
582 }
583 return 0;
584 }
585
586 static void
587 mgb_queues_free(if_ctx_t ctx)
588 {
589 struct mgb_softc *sc;
590
591 sc = iflib_get_softc(ctx);
592
593 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
594 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
595 }
596
597 static void
598 mgb_init(if_ctx_t ctx)
599 {
600 struct mgb_softc *sc;
601 struct mii_data *miid;
602 int error;
603
604 sc = iflib_get_softc(ctx);
605 miid = device_get_softc(sc->miibus);
606 device_printf(sc->dev, "running init ...\n");
607
608 mgb_dma_init(sc);
609
610 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
611 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
612 CSR_UPDATE_REG(sc, MGB_RFE_CTL,
613 MGB_RFE_ALLOW_BROADCAST |
614 MGB_RFE_ALLOW_UNICAST |
615 MGB_RFE_ALLOW_UNICAST);
616
617 error = mii_mediachg(miid);
618 KASSERT(!error, ("mii_mediachg returned: %d", error));
619 }
620
621 #ifdef DEBUG
622 static void
623 mgb_dump_some_stats(struct mgb_softc *sc)
624 {
625 int i;
626 int first_stat = 0x1200;
627 int last_stat = 0x12FC;
628
629 for (i = first_stat; i <= last_stat; i += 4)
630 if (CSR_READ_REG(sc, i) != 0)
631 device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
632 CSR_READ_REG(sc, i));
633 char *stat_names[] = {
634 "MAC_ERR_STS ",
635 "FCT_INT_STS ",
636 "DMAC_CFG ",
637 "DMAC_CMD ",
638 "DMAC_INT_STS ",
639 "DMAC_INT_EN ",
640 "DMAC_RX_ERR_STS0 ",
641 "DMAC_RX_ERR_STS1 ",
642 "DMAC_RX_ERR_STS2 ",
643 "DMAC_RX_ERR_STS3 ",
644 "INT_STS ",
645 "INT_EN ",
646 "INT_VEC_EN ",
647 "INT_VEC_MAP0 ",
648 "INT_VEC_MAP1 ",
649 "INT_VEC_MAP2 ",
650 "TX_HEAD0",
651 "TX_TAIL0",
652 "DMAC_TX_ERR_STS0 ",
653 NULL
654 };
655 int stats[] = {
656 0x114,
657 0xA0,
658 0xC00,
659 0xC0C,
660 0xC10,
661 0xC14,
662 0xC60,
663 0xCA0,
664 0xCE0,
665 0xD20,
666 0x780,
667 0x788,
668 0x794,
669 0x7A0,
670 0x7A4,
671 0x780,
672 0xD58,
673 0xD5C,
674 0xD60,
675 0x0
676 };
677 i = 0;
678 printf("==============================\n");
679 while (stats[i++])
680 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
681 stat_names[i - 1], stats[i - 1],
682 CSR_READ_REG(sc, stats[i - 1]));
683 printf("==== TX RING DESCS ====\n");
684 for (i = 0; i < MGB_DMA_RING_SIZE; i++)
685 device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
686 "ring[%d].data1=0x%08x\n"
687 "ring[%d].data2=0x%08x\n"
688 "ring[%d].data3=0x%08x\n",
689 i, sc->tx_ring_data.ring[i].ctl,
690 i, sc->tx_ring_data.ring[i].addr.low,
691 i, sc->tx_ring_data.ring[i].addr.high,
692 i, sc->tx_ring_data.ring[i].sts);
693 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
694 int i;
695 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
696 for (i = 0; i < 128; i++) {
697 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
698
699 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
700
701 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
702 DELAY(1000);
703
704 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
705 CSR_READ_REG(sc, 0x30)); // DP_DATA
706 }
707 }
708 #endif
709
710 static void
711 mgb_stop(if_ctx_t ctx)
712 {
713 struct mgb_softc *sc ;
714 if_softc_ctx_t scctx;
715 int i;
716
717 sc = iflib_get_softc(ctx);
718 scctx = iflib_get_softc_ctx(ctx);
719
720 /* XXX: Could potentially timeout */
721 for (i = 0; i < scctx->isc_nrxqsets; i++) {
722 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
723 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
724 }
725 for (i = 0; i < scctx->isc_ntxqsets; i++) {
726 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
727 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
728 }
729 }
730
731 static int
732 mgb_legacy_intr(void *xsc)
733 {
734 struct mgb_softc *sc;
735
736 sc = xsc;
737 iflib_admin_intr_deferred(sc->ctx);
738 return (FILTER_HANDLED);
739 }
740
741 static int
742 mgb_rxq_intr(void *xsc)
743 {
744 struct mgb_softc *sc;
745 if_softc_ctx_t scctx;
746 uint32_t intr_sts, intr_en;
747 int qidx;
748
749 sc = xsc;
750 scctx = iflib_get_softc_ctx(sc->ctx);
751
752 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
753 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
754 intr_sts &= intr_en;
755
756 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
757 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
758 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
759 MGB_INTR_STS_RX(qidx));
760 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
761 }
762 }
763 return (FILTER_SCHEDULE_THREAD);
764 }
765
766 static int
767 mgb_admin_intr(void *xsc)
768 {
769 struct mgb_softc *sc;
770 if_softc_ctx_t scctx;
771 uint32_t intr_sts, intr_en;
772 int qidx;
773
774 sc = xsc;
775 scctx = iflib_get_softc_ctx(sc->ctx);
776
777 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
778 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
779 intr_sts &= intr_en;
780
781 /*
782 * NOTE: Debugging printfs here
783 * will likely cause interrupt test failure.
784 */
785
786 /* TODO: shouldn't continue if suspended */
787 if ((intr_sts & MGB_INTR_STS_ANY) == 0)
788 {
789 device_printf(sc->dev, "non-mgb interrupt triggered.\n");
790 return (FILTER_SCHEDULE_THREAD);
791 }
792 if ((intr_sts & MGB_INTR_STS_TEST) != 0)
793 {
794 sc->isr_test_flag = true;
795 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
796 return (FILTER_HANDLED);
797 }
798 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0)
799 {
800 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
801 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
802 iflib_rx_intr_deferred(sc->ctx, qidx);
803 }
804 }
805 return (FILTER_HANDLED);
806 }
807 /* XXX: TX interrupts should not occur */
808 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0)
809 {
810 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
811 if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
812 /* clear the interrupt sts and run handler */
813 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
814 MGB_INTR_STS_TX(qidx));
815 CSR_WRITE_REG(sc, MGB_INTR_STS,
816 MGB_INTR_STS_TX(qidx));
817 iflib_tx_intr_deferred(sc->ctx, qidx);
818 }
819 }
820 return (FILTER_HANDLED);
821 }
822
823 return (FILTER_SCHEDULE_THREAD);
824 }
825
826 static int
827 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
828 {
829 struct mgb_softc *sc;
830 if_softc_ctx_t scctx;
831 int error, i, vectorid;
832 char irq_name[16];
833
834 sc = iflib_get_softc(ctx);
835 scctx = iflib_get_softc_ctx(ctx);
836
837 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
838 ("num rxqsets/txqsets != 1 "));
839
840 /*
841 * First vector should be admin interrupts, others vectors are TX/RX
842 *
843 * RIDs start at 1, and vector ids start at 0.
844 */
845 vectorid = 0;
846 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
847 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
848 if (error) {
849 device_printf(sc->dev,
850 "Failed to register admin interrupt handler\n");
851 return (error);
852 }
853
854 for (i = 0; i < scctx->isc_nrxqsets; i++) {
855 vectorid++;
856 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
857 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
858 IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name);
859 if (error) {
860 device_printf(sc->dev,
861 "Failed to register rxq %d interrupt handler\n", i);
862 return (error);
863 }
864 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
865 MGB_INTR_VEC_MAP(vectorid, i));
866 }
867
868 /* Not actually mapping hw TX interrupts ... */
869 for (i = 0; i < scctx->isc_ntxqsets; i++) {
870 snprintf(irq_name, sizeof(irq_name), "txq%d", i);
871 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
872 irq_name);
873 }
874
875 return (0);
876 }
877
878 static void
879 mgb_intr_enable_all(if_ctx_t ctx)
880 {
881 struct mgb_softc *sc;
882 if_softc_ctx_t scctx;
883 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
884
885 sc = iflib_get_softc(ctx);
886 scctx = iflib_get_softc_ctx(ctx);
887 intr_sts |= MGB_INTR_STS_ANY;
888 vec_en |= MGB_INTR_STS_ANY;
889
890 for (i = 0; i < scctx->isc_nrxqsets; i++) {
891 intr_sts |= MGB_INTR_STS_RX(i);
892 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
893 vec_en |= MGB_INTR_RX_VEC_STS(i);
894 }
895
896 /* TX interrupts aren't needed ... */
897
898 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
899 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
900 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
901 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
902 }
903
904 static void
905 mgb_intr_disable_all(if_ctx_t ctx)
906 {
907 struct mgb_softc *sc;
908
909 sc = iflib_get_softc(ctx);
910 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
911 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
912 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
913
914 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
915 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
916 }
917
918 static int
919 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
920 {
921 /* called after successful rx isr */
922 struct mgb_softc *sc;
923
924 sc = iflib_get_softc(ctx);
925 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
926 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
927
928 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
929 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
930 return (0);
931 }
932
933 static int
934 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
935 {
936 /* XXX: not called (since tx interrupts not used) */
937 struct mgb_softc *sc;
938
939 sc = iflib_get_softc(ctx);
940
941 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
942
943 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
944 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
945 return (0);
946 }
947
948 static bool
949 mgb_intr_test(struct mgb_softc *sc)
950 {
951 int i;
952
953 sc->isr_test_flag = false;
954 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
955 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
956 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
957 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
958 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
959 if (sc->isr_test_flag)
960 return true;
961 for (i = 0; i < MGB_TIMEOUT; i++) {
962 DELAY(10);
963 if (sc->isr_test_flag)
964 break;
965 }
966 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
967 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
968 return sc->isr_test_flag;
969 }
970
971 static int
972 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
973 {
974 struct mgb_softc *sc;
975 if_softc_ctx_t scctx;
976 struct mgb_ring_data *rdata;
977 struct mgb_ring_desc *txd;
978 bus_dma_segment_t *segs;
979 qidx_t pidx, nsegs;
980 int i;
981
982 KASSERT(ipi->ipi_qsidx == 0,
983 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
984 sc = xsc;
985 scctx = iflib_get_softc_ctx(sc->ctx);
986 rdata = &sc->tx_ring_data;
987
988 pidx = ipi->ipi_pidx;
989 segs = ipi->ipi_segs;
990 nsegs = ipi->ipi_nsegs;
991
992 /* For each seg, create a descriptor */
993 for (i = 0; i < nsegs; ++i) {
994 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
995 txd = &rdata->ring[pidx];
996 txd->ctl = htole32(
997 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
998 /*
999 * XXX: This will be wrong in the multipacket case
1000 * I suspect FS should be for the first packet and
1001 * LS should be for the last packet
1002 */
1003 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
1004 MGB_DESC_CTL_FCS);
1005 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
1006 segs[i].ds_addr));
1007 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
1008 segs[i].ds_addr));
1009 txd->sts = htole32(
1010 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
1011 pidx = MGB_NEXT_RING_IDX(pidx);
1012 }
1013 ipi->ipi_new_pidx = pidx;
1014 return (0);
1015 }
1016
1017 static void
1018 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1019 {
1020 struct mgb_softc *sc;
1021 struct mgb_ring_data *rdata;
1022
1023 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1024 sc = xsc;
1025 rdata = &sc->tx_ring_data;
1026
1027 if (rdata->last_tail != pidx) {
1028 rdata->last_tail = pidx;
1029 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1030 }
1031 }
1032
1033 static int
1034 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1035 {
1036 struct mgb_softc *sc;
1037 struct mgb_ring_desc *txd;
1038 struct mgb_ring_data *rdata;
1039 int processed = 0;
1040
1041 /*
1042 * > If clear is true, we need to report the number of TX command ring
1043 * > descriptors that have been processed by the device. If clear is
1044 * > false, we just need to report whether or not at least one TX
1045 * > command ring descriptor has been processed by the device.
1046 * - vmx driver
1047 */
1048 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1049 txqid));
1050 sc = xsc;
1051 rdata = &sc->tx_ring_data;
1052
1053 while (*(rdata->head_wb) != rdata->last_head) {
1054 if (!clear)
1055 return 1;
1056
1057 txd = &rdata->ring[rdata->last_head];
1058 memset(txd, 0, sizeof(struct mgb_ring_desc));
1059 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1060 processed++;
1061 }
1062
1063 return (processed);
1064 }
1065
1066 static int
1067 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1068 {
1069 struct mgb_softc *sc;
1070 if_softc_ctx_t scctx;
1071 struct mgb_ring_data *rdata;
1072 int avail = 0;
1073
1074 sc = xsc;
1075 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1076 rxqid));
1077
1078 rdata = &sc->rx_ring_data;
1079 scctx = iflib_get_softc_ctx(sc->ctx);
1080 for (; idx != *(rdata->head_wb);
1081 idx = MGB_NEXT_RING_IDX(idx)) {
1082 avail++;
1083 /* XXX: Could verify desc is device owned here */
1084 if (avail == budget)
1085 break;
1086 }
1087 return (avail);
1088 }
1089
1090 static int
1091 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1092 {
1093 struct mgb_softc *sc;
1094 struct mgb_ring_data *rdata;
1095 struct mgb_ring_desc rxd;
1096 int total_len;
1097
1098 KASSERT(ri->iri_qsidx == 0,
1099 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1100 sc = xsc;
1101 total_len = 0;
1102 rdata = &sc->rx_ring_data;
1103
1104 while (*(rdata->head_wb) != rdata->last_head) {
1105 /* copy ring desc and do swapping */
1106 rxd = rdata->ring[rdata->last_head];
1107 rxd.ctl = le32toh(rxd.ctl);
1108 rxd.addr.low = le32toh(rxd.ctl);
1109 rxd.addr.high = le32toh(rxd.ctl);
1110 rxd.sts = le32toh(rxd.ctl);
1111
1112 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1113 device_printf(sc->dev,
1114 "Tried to read descriptor ... "
1115 "found that it's owned by the driver\n");
1116 return EINVAL;
1117 }
1118 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1119 device_printf(sc->dev,
1120 "Tried to read descriptor ... "
1121 "found that FS is not set.\n");
1122 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1123 return EINVAL;
1124 }
1125 /* XXX: Multi-packet support */
1126 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1127 device_printf(sc->dev,
1128 "Tried to read descriptor ... "
1129 "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1130 return EINVAL;
1131 }
1132 ri->iri_frags[0].irf_flid = 0;
1133 ri->iri_frags[0].irf_idx = rdata->last_head;
1134 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1135 total_len += ri->iri_frags[0].irf_len;
1136
1137 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1138 break;
1139 }
1140 ri->iri_nfrags = 1;
1141 ri->iri_len = total_len;
1142
1143 return (0);
1144 }
1145
1146 static void
1147 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1148 {
1149 if_softc_ctx_t scctx;
1150 struct mgb_softc *sc;
1151 struct mgb_ring_data *rdata;
1152 struct mgb_ring_desc *rxd;
1153 uint64_t *paddrs;
1154 qidx_t *idxs;
1155 qidx_t idx;
1156 int count, len;
1157
1158 count = iru->iru_count;
1159 len = iru->iru_buf_size;
1160 idxs = iru->iru_idxs;
1161 paddrs = iru->iru_paddrs;
1162 KASSERT(iru->iru_qsidx == 0,
1163 ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1164
1165 sc = xsc;
1166 scctx = iflib_get_softc_ctx(sc->ctx);
1167 rdata = &sc->rx_ring_data;
1168
1169 while (count > 0) {
1170 idx = idxs[--count];
1171 rxd = &rdata->ring[idx];
1172
1173 rxd->sts = 0;
1174 rxd->addr.low =
1175 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1176 rxd->addr.high =
1177 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1178 rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1179 (len & MGB_DESC_CTL_BUFLEN_MASK));
1180 }
1181 return;
1182 }
1183
1184 static void
1185 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1186 {
1187 struct mgb_softc *sc;
1188
1189 sc = xsc;
1190
1191 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1192 /*
1193 * According to the programming guide, last_tail must be set to
1194 * the last valid RX descriptor, rather than to the one past that.
1195 * Note that this is not true for the TX ring!
1196 */
1197 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1198 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1199 return;
1200 }
1201
1202 static int
1203 mgb_test_bar(struct mgb_softc *sc)
1204 {
1205 uint32_t id_rev, dev_id, rev;
1206
1207 id_rev = CSR_READ_REG(sc, 0);
1208 dev_id = id_rev >> 16;
1209 rev = id_rev & 0xFFFF;
1210 if (dev_id == MGB_LAN7430_DEVICE_ID ||
1211 dev_id == MGB_LAN7431_DEVICE_ID) {
1212 return 0;
1213 } else {
1214 device_printf(sc->dev, "ID check failed.\n");
1215 return ENXIO;
1216 }
1217 }
1218
1219 static int
1220 mgb_alloc_regs(struct mgb_softc *sc)
1221 {
1222 int rid;
1223
1224 rid = PCIR_BAR(MGB_BAR);
1225 pci_enable_busmaster(sc->dev);
1226 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1227 &rid, RF_ACTIVE);
1228 if (sc->regs == NULL)
1229 return ENXIO;
1230
1231 return (0);
1232 }
1233
1234 static int
1235 mgb_release_regs(struct mgb_softc *sc)
1236 {
1237 int error = 0;
1238
1239 if (sc->regs != NULL)
1240 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1241 rman_get_rid(sc->regs), sc->regs);
1242 sc->regs = NULL;
1243 pci_disable_busmaster(sc->dev);
1244 return error;
1245 }
1246
1247 static int
1248 mgb_dma_init(struct mgb_softc *sc)
1249 {
1250 if_softc_ctx_t scctx;
1251 int ch, error = 0;
1252
1253 scctx = iflib_get_softc_ctx(sc->ctx);
1254
1255 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1256 if ((error = mgb_dma_rx_ring_init(sc, ch)))
1257 goto fail;
1258
1259 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1260 if ((error = mgb_dma_tx_ring_init(sc, ch)))
1261 goto fail;
1262
1263 fail:
1264 return error;
1265 }
1266
1267 static int
1268 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1269 {
1270 struct mgb_ring_data *rdata;
1271 int ring_config, error = 0;
1272
1273 rdata = &sc->rx_ring_data;
1274 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1275 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1276 ("Trying to init channels when not in init state\n"));
1277
1278 /* write ring address */
1279 if (rdata->ring_bus_addr == 0) {
1280 device_printf(sc->dev, "Invalid ring bus addr.\n");
1281 goto fail;
1282 }
1283
1284 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1285 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1286 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1287 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1288
1289 /* write head pointer writeback address */
1290 if (rdata->head_wb_bus_addr == 0) {
1291 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1292 goto fail;
1293 }
1294 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1295 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1296 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1297 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1298
1299 /* Enable head pointer writeback */
1300 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1301
1302 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1303 /* ring size */
1304 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1305 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1306 /* packet padding (PAD_2 is better for IP header alignment ...) */
1307 ring_config &= ~MGB_DMA_RING_PAD_MASK;
1308 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1309
1310 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1311
1312 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1313
1314 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1315 if (error != 0) {
1316 device_printf(sc->dev, "Failed to reset RX FCT.\n");
1317 goto fail;
1318 }
1319 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1320 if (error != 0) {
1321 device_printf(sc->dev, "Failed to enable RX FCT.\n");
1322 goto fail;
1323 }
1324 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1325 if (error != 0)
1326 device_printf(sc->dev, "Failed to start RX DMAC.\n");
1327 fail:
1328 return (error);
1329 }
1330
1331 static int
1332 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1333 {
1334 struct mgb_ring_data *rdata;
1335 int ring_config, error = 0;
1336
1337 rdata = &sc->tx_ring_data;
1338 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1339 device_printf(sc->dev, "Failed to reset TX FCT.\n");
1340 goto fail;
1341 }
1342 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1343 FCT_ENABLE))) {
1344 device_printf(sc->dev, "Failed to enable TX FCT.\n");
1345 goto fail;
1346 }
1347 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1348 DMAC_RESET))) {
1349 device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1350 goto fail;
1351 }
1352 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1353 ("Trying to init channels in not init state\n"));
1354
1355 /* write ring address */
1356 if (rdata->ring_bus_addr == 0) {
1357 device_printf(sc->dev, "Invalid ring bus addr.\n");
1358 goto fail;
1359 }
1360 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1361 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1362 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1363 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1364
1365 /* write ring size */
1366 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1367 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1368 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1369 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1370
1371 /* Enable interrupt on completion and head pointer writeback */
1372 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1373 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1374
1375 /* write head pointer writeback address */
1376 if (rdata->head_wb_bus_addr == 0) {
1377 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1378 goto fail;
1379 }
1380 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1381 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1382 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1383 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1384
1385 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1386 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1387 rdata->last_tail = 0;
1388 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1389
1390 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1391 DMAC_START)))
1392 device_printf(sc->dev, "Failed to start TX DMAC.\n");
1393 fail:
1394 return error;
1395 }
1396
1397 static int
1398 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1399 enum mgb_dmac_cmd cmd)
1400 {
1401 int error = 0;
1402
1403 switch (cmd) {
1404 case DMAC_RESET:
1405 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1406 MGB_DMAC_CMD_RESET(start, channel));
1407 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1408 MGB_DMAC_CMD_RESET(start, channel));
1409 break;
1410
1411 case DMAC_START:
1412 /*
1413 * NOTE: this simplifies the logic, since it will never
1414 * try to start in STOP_PENDING, but it also increases work.
1415 */
1416 error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1417 if (error != 0)
1418 return error;
1419 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1420 MGB_DMAC_CMD_START(start, channel));
1421 break;
1422
1423 case DMAC_STOP:
1424 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1425 MGB_DMAC_CMD_STOP(start, channel));
1426 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1427 MGB_DMAC_CMD_STOP(start, channel),
1428 MGB_DMAC_CMD_START(start, channel));
1429 break;
1430 }
1431 return error;
1432 }
1433
1434 static int
1435 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1436 enum mgb_fct_cmd cmd)
1437 {
1438
1439 switch (cmd) {
1440 case FCT_RESET:
1441 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1442 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel));
1443 case FCT_ENABLE:
1444 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1445 return (0);
1446 case FCT_DISABLE:
1447 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1448 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel));
1449 }
1450 }
1451
1452 static int
1453 mgb_hw_teardown(struct mgb_softc *sc)
1454 {
1455 int err = 0;
1456
1457 /* Stop MAC */
1458 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1459 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1460 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1461 return (err);
1462 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1463 return (err);
1464 return (err);
1465 }
1466
1467 static int
1468 mgb_hw_init(struct mgb_softc *sc)
1469 {
1470 int error = 0;
1471
1472 error = mgb_hw_reset(sc);
1473 if (error != 0)
1474 goto fail;
1475
1476 mgb_mac_init(sc);
1477
1478 error = mgb_phy_reset(sc);
1479 if (error != 0)
1480 goto fail;
1481
1482 error = mgb_dmac_reset(sc);
1483 if (error != 0)
1484 goto fail;
1485
1486 fail:
1487 return error;
1488 }
1489
1490 static int
1491 mgb_hw_reset(struct mgb_softc *sc)
1492 {
1493
1494 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1495 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1496 }
1497
1498 static int
1499 mgb_mac_init(struct mgb_softc *sc)
1500 {
1501
1502 /**
1503 * enable automatic duplex detection and
1504 * automatic speed detection
1505 */
1506 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1507 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1508 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1509
1510 return MGB_STS_OK;
1511 }
1512
1513 static int
1514 mgb_phy_reset(struct mgb_softc *sc)
1515 {
1516
1517 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1518 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1519 MGB_STS_TIMEOUT)
1520 return MGB_STS_TIMEOUT;
1521 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1522 }
1523
1524 static int
1525 mgb_dmac_reset(struct mgb_softc *sc)
1526 {
1527
1528 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1529 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1530 }
1531
1532 static int
1533 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1534 {
1535 int i, val;
1536
1537 i = 0;
1538 do {
1539 /*
1540 * XXX: Datasheets states delay should be > 5 microseconds
1541 * for device reset.
1542 */
1543 DELAY(100);
1544 val = CSR_READ_REG(sc, reg);
1545 if ((val & set_bits) == set_bits &&
1546 (val & clear_bits) == 0)
1547 return MGB_STS_OK;
1548 } while (i++ < MGB_TIMEOUT);
1549
1550 return MGB_STS_TIMEOUT;
1551 }
1552
1553 static void
1554 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1555 {
1556
1557 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1558 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1559 }
1560
1561 static int
1562 mgb_miibus_readreg(device_t dev, int phy, int reg)
1563 {
1564 struct mgb_softc *sc;
1565 int mii_access;
1566
1567 sc = iflib_get_softc(device_get_softc(dev));
1568
1569 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1570 MGB_STS_TIMEOUT)
1571 return EIO;
1572 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1573 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1574 mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1575 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1576 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1577 MGB_STS_TIMEOUT)
1578 return EIO;
1579 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1580 }
1581
1582 static int
1583 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1584 {
1585 struct mgb_softc *sc;
1586 int mii_access;
1587
1588 sc = iflib_get_softc(device_get_softc(dev));
1589
1590 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS,
1591 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT)
1592 return EIO;
1593 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1594 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1595 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1596 CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1597 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1598 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1599 MGB_STS_TIMEOUT)
1600 return EIO;
1601 return 0;
1602 }
1603
1604 /* XXX: May need to lock these up */
1605 static void
1606 mgb_miibus_statchg(device_t dev)
1607 {
1608 struct mgb_softc *sc;
1609 struct mii_data *miid;
1610
1611 sc = iflib_get_softc(device_get_softc(dev));
1612 miid = device_get_softc(sc->miibus);
1613 /* Update baudrate in iflib */
1614 sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1615 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1616 }
1617
1618 static void
1619 mgb_miibus_linkchg(device_t dev)
1620 {
1621 struct mgb_softc *sc;
1622 struct mii_data *miid;
1623 int link_state;
1624
1625 sc = iflib_get_softc(device_get_softc(dev));
1626 miid = device_get_softc(sc->miibus);
1627 /* XXX: copied from miibus_linkchg **/
1628 if (miid->mii_media_status & IFM_AVALID) {
1629 if (miid->mii_media_status & IFM_ACTIVE)
1630 link_state = LINK_STATE_UP;
1631 else
1632 link_state = LINK_STATE_DOWN;
1633 } else
1634 link_state = LINK_STATE_UNKNOWN;
1635 sc->link_state = link_state;
1636 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1637 }
Cache object: 5274749a693b25176e1e3cdd6dfbf41b
|