FreeBSD/Linux Kernel Cross Reference
sys/dev/mgb/if_mgb.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 The FreeBSD Foundation, Inc.
5 *
6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
35 *
36 * Product information:
37 * LAN7430 https://www.microchip.com/en-us/product/LAN7430
38 * - Integrated IEEE 802.3 compliant PHY
39 * LAN7431 https://www.microchip.com/en-us/product/LAN7431
40 * - RGMII Interface
41 *
42 * This driver uses the iflib interface and the default 'ukphy' PHY driver.
43 *
44 * UNIMPLEMENTED FEATURES
45 * ----------------------
46 * A number of features supported by LAN743X device are not yet implemented in
47 * this driver:
48 *
49 * - Multiple (up to 4) RX queues support
50 * - Just needs to remove asserts and malloc multiple `rx_ring_data`
51 * structs based on ncpus.
52 * - RX/TX Checksum Offloading support
53 * - VLAN support
54 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
55 * - Wake on LAN (WoL) support
56 * - TX LSO support
57 * - Receive Side Scaling (RSS) support
58 * - Debugging Capabilities:
59 * - Could include MAC statistics and
60 * error status registers in sysctl.
61 */
62
63 #include <sys/param.h>
64 #include <sys/bus.h>
65 #include <sys/endian.h>
66 #include <sys/kdb.h>
67 #include <sys/kernel.h>
68 #include <sys/module.h>
69 #include <sys/rman.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
74
75 #include <net/ethernet.h>
76 #include <net/if.h>
77 #include <net/if_var.h>
78 #include <net/if_types.h>
79 #include <net/if_media.h>
80 #include <net/iflib.h>
81
82 #include <dev/mgb/if_mgb.h>
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcivar.h>
87
88 #include "ifdi_if.h"
89 #include "miibus_if.h"
90
91 static pci_vendor_info_t mgb_vendor_info_array[] = {
92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
93 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
94 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
95 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
96 PVID_END
97 };
98
99 /* Device methods */
100 static device_register_t mgb_register;
101
102 /* IFLIB methods */
103 static ifdi_attach_pre_t mgb_attach_pre;
104 static ifdi_attach_post_t mgb_attach_post;
105 static ifdi_detach_t mgb_detach;
106
107 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc;
108 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc;
109 static ifdi_queues_free_t mgb_queues_free;
110
111 static ifdi_init_t mgb_init;
112 static ifdi_stop_t mgb_stop;
113
114 static ifdi_msix_intr_assign_t mgb_msix_intr_assign;
115 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable;
116 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable;
117 static ifdi_intr_enable_t mgb_intr_enable_all;
118 static ifdi_intr_disable_t mgb_intr_disable_all;
119
120 /* IFLIB_TXRX methods */
121 static int mgb_isc_txd_encap(void *,
122 if_pkt_info_t);
123 static void mgb_isc_txd_flush(void *,
124 uint16_t, qidx_t);
125 static int mgb_isc_txd_credits_update(void *,
126 uint16_t, bool);
127 static int mgb_isc_rxd_available(void *,
128 uint16_t, qidx_t, qidx_t);
129 static int mgb_isc_rxd_pkt_get(void *,
130 if_rxd_info_t);
131 static void mgb_isc_rxd_refill(void *,
132 if_rxd_update_t);
133 static void mgb_isc_rxd_flush(void *,
134 uint16_t, uint8_t, qidx_t);
135
136 /* Interrupts */
137 static driver_filter_t mgb_legacy_intr;
138 static driver_filter_t mgb_admin_intr;
139 static driver_filter_t mgb_rxq_intr;
140 static bool mgb_intr_test(struct mgb_softc *);
141
142 /* MII methods */
143 static miibus_readreg_t mgb_miibus_readreg;
144 static miibus_writereg_t mgb_miibus_writereg;
145 static miibus_linkchg_t mgb_miibus_linkchg;
146 static miibus_statchg_t mgb_miibus_statchg;
147
148 static int mgb_media_change(if_t);
149 static void mgb_media_status(if_t,
150 struct ifmediareq *);
151
152 /* Helper/Test functions */
153 static int mgb_test_bar(struct mgb_softc *);
154 static int mgb_alloc_regs(struct mgb_softc *);
155 static int mgb_release_regs(struct mgb_softc *);
156
157 static void mgb_get_ethaddr(struct mgb_softc *,
158 struct ether_addr *);
159
160 static int mgb_wait_for_bits(struct mgb_softc *,
161 int, int, int);
162
163 /* H/W init, reset and teardown helpers */
164 static int mgb_hw_init(struct mgb_softc *);
165 static int mgb_hw_teardown(struct mgb_softc *);
166 static int mgb_hw_reset(struct mgb_softc *);
167 static int mgb_mac_init(struct mgb_softc *);
168 static int mgb_dmac_reset(struct mgb_softc *);
169 static int mgb_phy_reset(struct mgb_softc *);
170
171 static int mgb_dma_init(struct mgb_softc *);
172 static int mgb_dma_tx_ring_init(struct mgb_softc *,
173 int);
174 static int mgb_dma_rx_ring_init(struct mgb_softc *,
175 int);
176
177 static int mgb_dmac_control(struct mgb_softc *,
178 int, int, enum mgb_dmac_cmd);
179 static int mgb_fct_control(struct mgb_softc *,
180 int, int, enum mgb_fct_cmd);
181
182 /*********************************************************************
183 * FreeBSD Device Interface Entry Points
184 *********************************************************************/
185
186 static device_method_t mgb_methods[] = {
187 /* Device interface */
188 DEVMETHOD(device_register, mgb_register),
189 DEVMETHOD(device_probe, iflib_device_probe),
190 DEVMETHOD(device_attach, iflib_device_attach),
191 DEVMETHOD(device_detach, iflib_device_detach),
192 DEVMETHOD(device_shutdown, iflib_device_shutdown),
193 DEVMETHOD(device_suspend, iflib_device_suspend),
194 DEVMETHOD(device_resume, iflib_device_resume),
195
196 /* MII Interface */
197 DEVMETHOD(miibus_readreg, mgb_miibus_readreg),
198 DEVMETHOD(miibus_writereg, mgb_miibus_writereg),
199 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg),
200 DEVMETHOD(miibus_statchg, mgb_miibus_statchg),
201
202 DEVMETHOD_END
203 };
204
205 static driver_t mgb_driver = {
206 "mgb", mgb_methods, sizeof(struct mgb_softc)
207 };
208
209 static devclass_t mgb_devclass;
210 DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL);
211 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
212 MODULE_VERSION(mgb, 1);
213
214 #if 0 /* MIIBUS_DEBUG */
215 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
216 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL,
217 SI_ORDER_ANY);
218 #endif /* MIIBUS_DEBUG */
219 DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL);
220
221 MODULE_DEPEND(mgb, pci, 1, 1, 1);
222 MODULE_DEPEND(mgb, ether, 1, 1, 1);
223 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
224 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
225
226 static device_method_t mgb_iflib_methods[] = {
227 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
228 DEVMETHOD(ifdi_attach_post, mgb_attach_post),
229 DEVMETHOD(ifdi_detach, mgb_detach),
230
231 DEVMETHOD(ifdi_init, mgb_init),
232 DEVMETHOD(ifdi_stop, mgb_stop),
233
234 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
235 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
236 DEVMETHOD(ifdi_queues_free, mgb_queues_free),
237
238 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
239 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
240 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
241 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
242 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
243
244 #if 0 /* Not yet implemented IFLIB methods */
245 /*
246 * Set multicast addresses, mtu and promiscuous mode
247 */
248 DEVMETHOD(ifdi_multi_set, mgb_multi_set),
249 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
250 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
251
252 /*
253 * Needed for VLAN support
254 */
255 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
256 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
257
258 /*
259 * Needed for WOL support
260 * at the very least.
261 */
262 DEVMETHOD(ifdi_shutdown, mgb_shutdown),
263 DEVMETHOD(ifdi_suspend, mgb_suspend),
264 DEVMETHOD(ifdi_resume, mgb_resume),
265 #endif /* UNUSED_IFLIB_METHODS */
266 DEVMETHOD_END
267 };
268
269 static driver_t mgb_iflib_driver = {
270 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
271 };
272
273 static struct if_txrx mgb_txrx = {
274 .ift_txd_encap = mgb_isc_txd_encap,
275 .ift_txd_flush = mgb_isc_txd_flush,
276 .ift_txd_credits_update = mgb_isc_txd_credits_update,
277 .ift_rxd_available = mgb_isc_rxd_available,
278 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
279 .ift_rxd_refill = mgb_isc_rxd_refill,
280 .ift_rxd_flush = mgb_isc_rxd_flush,
281
282 .ift_legacy_intr = mgb_legacy_intr
283 };
284
285 static struct if_shared_ctx mgb_sctx_init = {
286 .isc_magic = IFLIB_MAGIC,
287
288 .isc_q_align = PAGE_SIZE,
289 .isc_admin_intrcnt = 1,
290 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
291
292 .isc_vendor_info = mgb_vendor_info_array,
293 .isc_driver_version = "1",
294 .isc_driver = &mgb_iflib_driver,
295 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */
296 .isc_ntxqs = 2,
297
298 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES,
299 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
300 .isc_tx_maxsegsize = MCLBYTES,
301
302 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
303 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
304 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
305
306 .isc_nrxqs = 2,
307
308 .isc_rx_maxsize = MCLBYTES,
309 .isc_rx_nsegments = 1,
310 .isc_rx_maxsegsize = MCLBYTES,
311
312 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
313 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
314 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
315
316 .isc_nfl = 1, /*one free list since there is only one queue */
317 #if 0 /* UNUSED_CTX */
318
319 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
320 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
321 #endif /* UNUSED_CTX */
322 };
323
324 /*********************************************************************/
325
326 static void *
327 mgb_register(device_t dev)
328 {
329
330 return (&mgb_sctx_init);
331 }
332
333 static int
334 mgb_attach_pre(if_ctx_t ctx)
335 {
336 struct mgb_softc *sc;
337 if_softc_ctx_t scctx;
338 int error, phyaddr, rid;
339 struct ether_addr hwaddr;
340 struct mii_data *miid;
341
342 sc = iflib_get_softc(ctx);
343 sc->ctx = ctx;
344 sc->dev = iflib_get_dev(ctx);
345 scctx = iflib_get_softc_ctx(ctx);
346
347 /* IFLIB required setup */
348 scctx->isc_txrx = &mgb_txrx;
349 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
350 /* Ring desc queues */
351 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
352 scctx->isc_ntxd[0];
353 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
354 scctx->isc_nrxd[0];
355
356 /* Head WB queues */
357 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
358 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
359
360 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
361 scctx->isc_nrxqsets = 1;
362 scctx->isc_ntxqsets = 1;
363
364 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
365 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
366 scctx->isc_tx_csum_flags = 0;
367 scctx->isc_capabilities = scctx->isc_capenable = 0;
368 #if 0
369 /*
370 * CSUM, TSO and VLAN support are TBD
371 */
372 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
373 IFCAP_TSO4 | IFCAP_TSO6 |
374 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
375 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
376 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
377 IFCAP_JUMBO_MTU;
378 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
379 #endif
380
381 /* get the BAR */
382 error = mgb_alloc_regs(sc);
383 if (error != 0) {
384 device_printf(sc->dev,
385 "Unable to allocate bus resource: registers.\n");
386 goto fail;
387 }
388
389 error = mgb_test_bar(sc);
390 if (error != 0)
391 goto fail;
392
393 error = mgb_hw_init(sc);
394 if (error != 0) {
395 device_printf(sc->dev,
396 "MGB device init failed. (err: %d)\n", error);
397 goto fail;
398 }
399
400 switch (pci_get_device(sc->dev)) {
401 case MGB_LAN7430_DEVICE_ID:
402 phyaddr = 1;
403 break;
404 case MGB_LAN7431_DEVICE_ID:
405 default:
406 phyaddr = MII_PHY_ANY;
407 break;
408 }
409
410 /* XXX: Would be nice(r) if locked methods were here */
411 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
412 mgb_media_change, mgb_media_status,
413 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
414 if (error != 0) {
415 device_printf(sc->dev, "Failed to attach MII interface\n");
416 goto fail;
417 }
418
419 miid = device_get_softc(sc->miibus);
420 scctx->isc_media = &miid->mii_media;
421
422 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
423 /** Setup PBA BAR **/
424 rid = pci_msix_pba_bar(sc->dev);
425 if (rid != scctx->isc_msix_bar) {
426 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
427 &rid, RF_ACTIVE);
428 if (sc->pba == NULL) {
429 error = ENXIO;
430 device_printf(sc->dev, "Failed to setup PBA BAR\n");
431 goto fail;
432 }
433 }
434
435 mgb_get_ethaddr(sc, &hwaddr);
436 if (ETHER_IS_BROADCAST(hwaddr.octet) ||
437 ETHER_IS_MULTICAST(hwaddr.octet) ||
438 ETHER_IS_ZERO(hwaddr.octet))
439 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
440
441 /*
442 * XXX: if the MAC address was generated the linux driver
443 * writes it back to the device.
444 */
445 iflib_set_mac(ctx, hwaddr.octet);
446
447 /* Map all vectors to vector 0 (admin interrupts) by default. */
448 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
449 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
450 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
451
452 return (0);
453
454 fail:
455 mgb_detach(ctx);
456 return (error);
457 }
458
459 static int
460 mgb_attach_post(if_ctx_t ctx)
461 {
462 struct mgb_softc *sc;
463
464 sc = iflib_get_softc(ctx);
465
466 device_printf(sc->dev, "Interrupt test: %s\n",
467 (mgb_intr_test(sc) ? "PASS" : "FAIL"));
468
469 return (0);
470 }
471
472 static int
473 mgb_detach(if_ctx_t ctx)
474 {
475 struct mgb_softc *sc;
476 int error;
477
478 sc = iflib_get_softc(ctx);
479
480 /* XXX: Should report errors but still detach everything. */
481 error = mgb_hw_teardown(sc);
482
483 /* Release IRQs */
484 iflib_irq_free(ctx, &sc->rx_irq);
485 iflib_irq_free(ctx, &sc->admin_irq);
486
487 if (sc->miibus != NULL)
488 device_delete_child(sc->dev, sc->miibus);
489
490 if (sc->pba != NULL)
491 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
492 rman_get_rid(sc->pba), sc->pba);
493 sc->pba = NULL;
494
495 error = mgb_release_regs(sc);
496
497 return (error);
498 }
499
500 static int
501 mgb_media_change(if_t ifp)
502 {
503 struct mii_data *miid;
504 struct mii_softc *miisc;
505 struct mgb_softc *sc;
506 if_ctx_t ctx;
507 int needs_reset;
508
509 ctx = if_getsoftc(ifp);
510 sc = iflib_get_softc(ctx);
511 miid = device_get_softc(sc->miibus);
512 LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
513 PHY_RESET(miisc);
514
515 needs_reset = mii_mediachg(miid);
516 if (needs_reset != 0)
517 ifp->if_init(ctx);
518 return (needs_reset);
519 }
520
521 static void
522 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
523 {
524 struct mgb_softc *sc;
525 struct mii_data *miid;
526
527 sc = iflib_get_softc(if_getsoftc(ifp));
528 miid = device_get_softc(sc->miibus);
529 if ((if_getflags(ifp) & IFF_UP) == 0)
530 return;
531
532 mii_pollstat(miid);
533 ifmr->ifm_active = miid->mii_media_active;
534 ifmr->ifm_status = miid->mii_media_status;
535 }
536
537 static int
538 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
539 int ntxqsets)
540 {
541 struct mgb_softc *sc;
542 struct mgb_ring_data *rdata;
543 int q;
544
545 sc = iflib_get_softc(ctx);
546 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
547 rdata = &sc->tx_ring_data;
548 for (q = 0; q < ntxqsets; q++) {
549 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
550 /* Ring */
551 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
552 rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
553
554 /* Head WB */
555 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
556 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
557 }
558 return (0);
559 }
560
561 static int
562 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
563 int nrxqsets)
564 {
565 struct mgb_softc *sc;
566 struct mgb_ring_data *rdata;
567 int q;
568
569 sc = iflib_get_softc(ctx);
570 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
571 rdata = &sc->rx_ring_data;
572 for (q = 0; q < nrxqsets; q++) {
573 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
574 /* Ring */
575 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
576 rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
577
578 /* Head WB */
579 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
580 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
581 }
582 return (0);
583 }
584
585 static void
586 mgb_queues_free(if_ctx_t ctx)
587 {
588 struct mgb_softc *sc;
589
590 sc = iflib_get_softc(ctx);
591
592 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
593 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
594 }
595
596 static void
597 mgb_init(if_ctx_t ctx)
598 {
599 struct mgb_softc *sc;
600 struct mii_data *miid;
601 int error;
602
603 sc = iflib_get_softc(ctx);
604 miid = device_get_softc(sc->miibus);
605 device_printf(sc->dev, "running init ...\n");
606
607 mgb_dma_init(sc);
608
609 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
610 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
611 CSR_UPDATE_REG(sc, MGB_RFE_CTL,
612 MGB_RFE_ALLOW_BROADCAST |
613 MGB_RFE_ALLOW_MULTICAST |
614 MGB_RFE_ALLOW_UNICAST);
615
616 error = mii_mediachg(miid);
617 /* Not much we can do if this fails. */
618 if (error)
619 device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__,
620 error);
621 }
622
623 #ifdef DEBUG
624 static void
625 mgb_dump_some_stats(struct mgb_softc *sc)
626 {
627 int i;
628 int first_stat = 0x1200;
629 int last_stat = 0x12FC;
630
631 for (i = first_stat; i <= last_stat; i += 4)
632 if (CSR_READ_REG(sc, i) != 0)
633 device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
634 CSR_READ_REG(sc, i));
635 char *stat_names[] = {
636 "MAC_ERR_STS ",
637 "FCT_INT_STS ",
638 "DMAC_CFG ",
639 "DMAC_CMD ",
640 "DMAC_INT_STS ",
641 "DMAC_INT_EN ",
642 "DMAC_RX_ERR_STS0 ",
643 "DMAC_RX_ERR_STS1 ",
644 "DMAC_RX_ERR_STS2 ",
645 "DMAC_RX_ERR_STS3 ",
646 "INT_STS ",
647 "INT_EN ",
648 "INT_VEC_EN ",
649 "INT_VEC_MAP0 ",
650 "INT_VEC_MAP1 ",
651 "INT_VEC_MAP2 ",
652 "TX_HEAD0",
653 "TX_TAIL0",
654 "DMAC_TX_ERR_STS0 ",
655 NULL
656 };
657 int stats[] = {
658 0x114,
659 0xA0,
660 0xC00,
661 0xC0C,
662 0xC10,
663 0xC14,
664 0xC60,
665 0xCA0,
666 0xCE0,
667 0xD20,
668 0x780,
669 0x788,
670 0x794,
671 0x7A0,
672 0x7A4,
673 0x780,
674 0xD58,
675 0xD5C,
676 0xD60,
677 0x0
678 };
679 i = 0;
680 printf("==============================\n");
681 while (stats[i++])
682 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
683 stat_names[i - 1], stats[i - 1],
684 CSR_READ_REG(sc, stats[i - 1]));
685 printf("==== TX RING DESCS ====\n");
686 for (i = 0; i < MGB_DMA_RING_SIZE; i++)
687 device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
688 "ring[%d].data1=0x%08x\n"
689 "ring[%d].data2=0x%08x\n"
690 "ring[%d].data3=0x%08x\n",
691 i, sc->tx_ring_data.ring[i].ctl,
692 i, sc->tx_ring_data.ring[i].addr.low,
693 i, sc->tx_ring_data.ring[i].addr.high,
694 i, sc->tx_ring_data.ring[i].sts);
695 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
696 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
697 for (i = 0; i < 128; i++) {
698 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
699
700 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
701
702 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
703 DELAY(1000);
704
705 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
706 CSR_READ_REG(sc, 0x30)); // DP_DATA
707 }
708 }
709 #endif
710
711 static void
712 mgb_stop(if_ctx_t ctx)
713 {
714 struct mgb_softc *sc ;
715 if_softc_ctx_t scctx;
716 int i;
717
718 sc = iflib_get_softc(ctx);
719 scctx = iflib_get_softc_ctx(ctx);
720
721 /* XXX: Could potentially timeout */
722 for (i = 0; i < scctx->isc_nrxqsets; i++) {
723 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
724 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
725 }
726 for (i = 0; i < scctx->isc_ntxqsets; i++) {
727 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
728 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
729 }
730 }
731
732 static int
733 mgb_legacy_intr(void *xsc)
734 {
735 struct mgb_softc *sc;
736
737 sc = xsc;
738 iflib_admin_intr_deferred(sc->ctx);
739 return (FILTER_HANDLED);
740 }
741
742 static int
743 mgb_rxq_intr(void *xsc)
744 {
745 struct mgb_softc *sc;
746 if_softc_ctx_t scctx;
747 uint32_t intr_sts, intr_en;
748 int qidx;
749
750 sc = xsc;
751 scctx = iflib_get_softc_ctx(sc->ctx);
752
753 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
754 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
755 intr_sts &= intr_en;
756
757 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
758 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
759 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
760 MGB_INTR_STS_RX(qidx));
761 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
762 }
763 }
764 return (FILTER_SCHEDULE_THREAD);
765 }
766
767 static int
768 mgb_admin_intr(void *xsc)
769 {
770 struct mgb_softc *sc;
771 if_softc_ctx_t scctx;
772 uint32_t intr_sts, intr_en;
773 int qidx;
774
775 sc = xsc;
776 scctx = iflib_get_softc_ctx(sc->ctx);
777
778 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
779 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
780 intr_sts &= intr_en;
781
782 /* TODO: shouldn't continue if suspended */
783 if ((intr_sts & MGB_INTR_STS_ANY) == 0)
784 return (FILTER_STRAY);
785 if ((intr_sts & MGB_INTR_STS_TEST) != 0) {
786 sc->isr_test_flag = true;
787 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
788 return (FILTER_HANDLED);
789 }
790 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) {
791 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
792 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
793 iflib_rx_intr_deferred(sc->ctx, qidx);
794 }
795 }
796 return (FILTER_HANDLED);
797 }
798 /* XXX: TX interrupts should not occur */
799 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) {
800 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
801 if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
802 /* clear the interrupt sts and run handler */
803 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
804 MGB_INTR_STS_TX(qidx));
805 CSR_WRITE_REG(sc, MGB_INTR_STS,
806 MGB_INTR_STS_TX(qidx));
807 iflib_tx_intr_deferred(sc->ctx, qidx);
808 }
809 }
810 return (FILTER_HANDLED);
811 }
812
813 return (FILTER_SCHEDULE_THREAD);
814 }
815
816 static int
817 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
818 {
819 struct mgb_softc *sc;
820 if_softc_ctx_t scctx;
821 int error, i, vectorid;
822 char irq_name[16];
823
824 sc = iflib_get_softc(ctx);
825 scctx = iflib_get_softc_ctx(ctx);
826
827 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
828 ("num rxqsets/txqsets != 1 "));
829
830 /*
831 * First vector should be admin interrupts, others vectors are TX/RX
832 *
833 * RIDs start at 1, and vector ids start at 0.
834 */
835 vectorid = 0;
836 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
837 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
838 if (error) {
839 device_printf(sc->dev,
840 "Failed to register admin interrupt handler\n");
841 return (error);
842 }
843
844 for (i = 0; i < scctx->isc_nrxqsets; i++) {
845 vectorid++;
846 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
847 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
848 IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name);
849 if (error) {
850 device_printf(sc->dev,
851 "Failed to register rxq %d interrupt handler\n", i);
852 return (error);
853 }
854 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
855 MGB_INTR_VEC_MAP(vectorid, i));
856 }
857
858 /* Not actually mapping hw TX interrupts ... */
859 for (i = 0; i < scctx->isc_ntxqsets; i++) {
860 snprintf(irq_name, sizeof(irq_name), "txq%d", i);
861 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
862 irq_name);
863 }
864
865 return (0);
866 }
867
868 static void
869 mgb_intr_enable_all(if_ctx_t ctx)
870 {
871 struct mgb_softc *sc;
872 if_softc_ctx_t scctx;
873 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
874
875 sc = iflib_get_softc(ctx);
876 scctx = iflib_get_softc_ctx(ctx);
877 intr_sts |= MGB_INTR_STS_ANY;
878 vec_en |= MGB_INTR_STS_ANY;
879
880 for (i = 0; i < scctx->isc_nrxqsets; i++) {
881 intr_sts |= MGB_INTR_STS_RX(i);
882 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
883 vec_en |= MGB_INTR_RX_VEC_STS(i);
884 }
885
886 /* TX interrupts aren't needed ... */
887
888 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
889 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
890 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
891 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
892 }
893
894 static void
895 mgb_intr_disable_all(if_ctx_t ctx)
896 {
897 struct mgb_softc *sc;
898
899 sc = iflib_get_softc(ctx);
900 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
901 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
902 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
903
904 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
905 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
906 }
907
908 static int
909 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
910 {
911 /* called after successful rx isr */
912 struct mgb_softc *sc;
913
914 sc = iflib_get_softc(ctx);
915 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
916 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
917
918 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
919 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
920 return (0);
921 }
922
923 static int
924 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
925 {
926 /* XXX: not called (since tx interrupts not used) */
927 struct mgb_softc *sc;
928
929 sc = iflib_get_softc(ctx);
930
931 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
932
933 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
934 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
935 return (0);
936 }
937
938 static bool
939 mgb_intr_test(struct mgb_softc *sc)
940 {
941 int i;
942
943 sc->isr_test_flag = false;
944 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
945 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
946 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
947 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
948 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
949 if (sc->isr_test_flag)
950 return (true);
951 for (i = 0; i < MGB_TIMEOUT; i++) {
952 DELAY(10);
953 if (sc->isr_test_flag)
954 break;
955 }
956 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
957 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
958 return (sc->isr_test_flag);
959 }
960
961 static int
962 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
963 {
964 struct mgb_softc *sc;
965 struct mgb_ring_data *rdata;
966 struct mgb_ring_desc *txd;
967 bus_dma_segment_t *segs;
968 qidx_t pidx, nsegs;
969 int i;
970
971 KASSERT(ipi->ipi_qsidx == 0,
972 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
973 sc = xsc;
974 rdata = &sc->tx_ring_data;
975
976 pidx = ipi->ipi_pidx;
977 segs = ipi->ipi_segs;
978 nsegs = ipi->ipi_nsegs;
979
980 /* For each seg, create a descriptor */
981 for (i = 0; i < nsegs; ++i) {
982 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
983 txd = &rdata->ring[pidx];
984 txd->ctl = htole32(
985 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
986 /*
987 * XXX: This will be wrong in the multipacket case
988 * I suspect FS should be for the first packet and
989 * LS should be for the last packet
990 */
991 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
992 MGB_DESC_CTL_FCS);
993 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
994 segs[i].ds_addr));
995 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
996 segs[i].ds_addr));
997 txd->sts = htole32(
998 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
999 pidx = MGB_NEXT_RING_IDX(pidx);
1000 }
1001 ipi->ipi_new_pidx = pidx;
1002 return (0);
1003 }
1004
1005 static void
1006 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1007 {
1008 struct mgb_softc *sc;
1009 struct mgb_ring_data *rdata;
1010
1011 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1012 sc = xsc;
1013 rdata = &sc->tx_ring_data;
1014
1015 if (rdata->last_tail != pidx) {
1016 rdata->last_tail = pidx;
1017 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1018 }
1019 }
1020
1021 static int
1022 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1023 {
1024 struct mgb_softc *sc;
1025 struct mgb_ring_desc *txd;
1026 struct mgb_ring_data *rdata;
1027 int processed = 0;
1028
1029 /*
1030 * > If clear is true, we need to report the number of TX command ring
1031 * > descriptors that have been processed by the device. If clear is
1032 * > false, we just need to report whether or not at least one TX
1033 * > command ring descriptor has been processed by the device.
1034 * - vmx driver
1035 */
1036 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1037 txqid));
1038 sc = xsc;
1039 rdata = &sc->tx_ring_data;
1040
1041 while (*(rdata->head_wb) != rdata->last_head) {
1042 if (!clear)
1043 return (1);
1044
1045 txd = &rdata->ring[rdata->last_head];
1046 memset(txd, 0, sizeof(struct mgb_ring_desc));
1047 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1048 processed++;
1049 }
1050
1051 return (processed);
1052 }
1053
1054 static int
1055 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1056 {
1057 struct mgb_softc *sc;
1058 struct mgb_ring_data *rdata;
1059 int avail = 0;
1060
1061 sc = xsc;
1062 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1063 rxqid));
1064
1065 rdata = &sc->rx_ring_data;
1066 for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) {
1067 avail++;
1068 /* XXX: Could verify desc is device owned here */
1069 if (avail == budget)
1070 break;
1071 }
1072 return (avail);
1073 }
1074
1075 static int
1076 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1077 {
1078 struct mgb_softc *sc;
1079 struct mgb_ring_data *rdata;
1080 struct mgb_ring_desc rxd;
1081 int total_len;
1082
1083 KASSERT(ri->iri_qsidx == 0,
1084 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1085 sc = xsc;
1086 total_len = 0;
1087 rdata = &sc->rx_ring_data;
1088
1089 while (*(rdata->head_wb) != rdata->last_head) {
1090 /* copy ring desc and do swapping */
1091 rxd = rdata->ring[rdata->last_head];
1092 rxd.ctl = le32toh(rxd.ctl);
1093 rxd.addr.low = le32toh(rxd.ctl);
1094 rxd.addr.high = le32toh(rxd.ctl);
1095 rxd.sts = le32toh(rxd.ctl);
1096
1097 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1098 device_printf(sc->dev,
1099 "Tried to read descriptor ... "
1100 "found that it's owned by the driver\n");
1101 return (EINVAL);
1102 }
1103 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1104 device_printf(sc->dev,
1105 "Tried to read descriptor ... "
1106 "found that FS is not set.\n");
1107 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1108 return (EINVAL);
1109 }
1110 /* XXX: Multi-packet support */
1111 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1112 device_printf(sc->dev,
1113 "Tried to read descriptor ... "
1114 "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1115 return (EINVAL);
1116 }
1117 ri->iri_frags[0].irf_flid = 0;
1118 ri->iri_frags[0].irf_idx = rdata->last_head;
1119 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1120 total_len += ri->iri_frags[0].irf_len;
1121
1122 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1123 break;
1124 }
1125 ri->iri_nfrags = 1;
1126 ri->iri_len = total_len;
1127
1128 return (0);
1129 }
1130
1131 static void
1132 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1133 {
1134 struct mgb_softc *sc;
1135 struct mgb_ring_data *rdata;
1136 struct mgb_ring_desc *rxd;
1137 uint64_t *paddrs;
1138 qidx_t *idxs;
1139 qidx_t idx;
1140 int count, len;
1141
1142 count = iru->iru_count;
1143 len = iru->iru_buf_size;
1144 idxs = iru->iru_idxs;
1145 paddrs = iru->iru_paddrs;
1146 KASSERT(iru->iru_qsidx == 0,
1147 ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1148
1149 sc = xsc;
1150 rdata = &sc->rx_ring_data;
1151
1152 while (count > 0) {
1153 idx = idxs[--count];
1154 rxd = &rdata->ring[idx];
1155
1156 rxd->sts = 0;
1157 rxd->addr.low =
1158 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1159 rxd->addr.high =
1160 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1161 rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1162 (len & MGB_DESC_CTL_BUFLEN_MASK));
1163 }
1164 return;
1165 }
1166
1167 static void
1168 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1169 {
1170 struct mgb_softc *sc;
1171
1172 sc = xsc;
1173
1174 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1175 /*
1176 * According to the programming guide, last_tail must be set to
1177 * the last valid RX descriptor, rather than to the one past that.
1178 * Note that this is not true for the TX ring!
1179 */
1180 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1181 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1182 return;
1183 }
1184
1185 static int
1186 mgb_test_bar(struct mgb_softc *sc)
1187 {
1188 uint32_t id_rev, dev_id;
1189
1190 id_rev = CSR_READ_REG(sc, 0);
1191 dev_id = id_rev >> 16;
1192 if (dev_id == MGB_LAN7430_DEVICE_ID ||
1193 dev_id == MGB_LAN7431_DEVICE_ID) {
1194 return (0);
1195 } else {
1196 device_printf(sc->dev, "ID check failed.\n");
1197 return (ENXIO);
1198 }
1199 }
1200
1201 static int
1202 mgb_alloc_regs(struct mgb_softc *sc)
1203 {
1204 int rid;
1205
1206 rid = PCIR_BAR(MGB_BAR);
1207 pci_enable_busmaster(sc->dev);
1208 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1209 &rid, RF_ACTIVE);
1210 if (sc->regs == NULL)
1211 return (ENXIO);
1212
1213 return (0);
1214 }
1215
1216 static int
1217 mgb_release_regs(struct mgb_softc *sc)
1218 {
1219 int error = 0;
1220
1221 if (sc->regs != NULL)
1222 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1223 rman_get_rid(sc->regs), sc->regs);
1224 sc->regs = NULL;
1225 pci_disable_busmaster(sc->dev);
1226 return (error);
1227 }
1228
1229 static int
1230 mgb_dma_init(struct mgb_softc *sc)
1231 {
1232 if_softc_ctx_t scctx;
1233 int ch, error = 0;
1234
1235 scctx = iflib_get_softc_ctx(sc->ctx);
1236
1237 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1238 if ((error = mgb_dma_rx_ring_init(sc, ch)))
1239 goto fail;
1240
1241 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1242 if ((error = mgb_dma_tx_ring_init(sc, ch)))
1243 goto fail;
1244
1245 fail:
1246 return (error);
1247 }
1248
1249 static int
1250 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1251 {
1252 struct mgb_ring_data *rdata;
1253 int ring_config, error = 0;
1254
1255 rdata = &sc->rx_ring_data;
1256 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1257 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1258 ("Trying to init channels when not in init state\n"));
1259
1260 /* write ring address */
1261 if (rdata->ring_bus_addr == 0) {
1262 device_printf(sc->dev, "Invalid ring bus addr.\n");
1263 goto fail;
1264 }
1265
1266 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1267 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1268 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1269 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1270
1271 /* write head pointer writeback address */
1272 if (rdata->head_wb_bus_addr == 0) {
1273 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1274 goto fail;
1275 }
1276 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1277 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1278 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1279 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1280
1281 /* Enable head pointer writeback */
1282 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1283
1284 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1285 /* ring size */
1286 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1287 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1288 /* packet padding (PAD_2 is better for IP header alignment ...) */
1289 ring_config &= ~MGB_DMA_RING_PAD_MASK;
1290 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1291
1292 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1293
1294 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1295
1296 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1297 if (error != 0) {
1298 device_printf(sc->dev, "Failed to reset RX FCT.\n");
1299 goto fail;
1300 }
1301 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1302 if (error != 0) {
1303 device_printf(sc->dev, "Failed to enable RX FCT.\n");
1304 goto fail;
1305 }
1306 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1307 if (error != 0)
1308 device_printf(sc->dev, "Failed to start RX DMAC.\n");
1309 fail:
1310 return (error);
1311 }
1312
1313 static int
1314 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1315 {
1316 struct mgb_ring_data *rdata;
1317 int ring_config, error = 0;
1318
1319 rdata = &sc->tx_ring_data;
1320 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1321 device_printf(sc->dev, "Failed to reset TX FCT.\n");
1322 goto fail;
1323 }
1324 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1325 FCT_ENABLE))) {
1326 device_printf(sc->dev, "Failed to enable TX FCT.\n");
1327 goto fail;
1328 }
1329 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1330 DMAC_RESET))) {
1331 device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1332 goto fail;
1333 }
1334 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1335 ("Trying to init channels in not init state\n"));
1336
1337 /* write ring address */
1338 if (rdata->ring_bus_addr == 0) {
1339 device_printf(sc->dev, "Invalid ring bus addr.\n");
1340 goto fail;
1341 }
1342 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1343 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1344 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1345 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1346
1347 /* write ring size */
1348 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1349 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1350 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1351 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1352
1353 /* Enable interrupt on completion and head pointer writeback */
1354 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1355 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1356
1357 /* write head pointer writeback address */
1358 if (rdata->head_wb_bus_addr == 0) {
1359 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1360 goto fail;
1361 }
1362 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1363 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1364 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1365 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1366
1367 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1368 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1369 rdata->last_tail = 0;
1370 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1371
1372 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1373 DMAC_START)))
1374 device_printf(sc->dev, "Failed to start TX DMAC.\n");
1375 fail:
1376 return (error);
1377 }
1378
1379 static int
1380 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1381 enum mgb_dmac_cmd cmd)
1382 {
1383 int error = 0;
1384
1385 switch (cmd) {
1386 case DMAC_RESET:
1387 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1388 MGB_DMAC_CMD_RESET(start, channel));
1389 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1390 MGB_DMAC_CMD_RESET(start, channel));
1391 break;
1392
1393 case DMAC_START:
1394 /*
1395 * NOTE: this simplifies the logic, since it will never
1396 * try to start in STOP_PENDING, but it also increases work.
1397 */
1398 error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1399 if (error != 0)
1400 return (error);
1401 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1402 MGB_DMAC_CMD_START(start, channel));
1403 break;
1404
1405 case DMAC_STOP:
1406 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1407 MGB_DMAC_CMD_STOP(start, channel));
1408 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1409 MGB_DMAC_CMD_STOP(start, channel),
1410 MGB_DMAC_CMD_START(start, channel));
1411 break;
1412 }
1413 return (error);
1414 }
1415
1416 static int
1417 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1418 enum mgb_fct_cmd cmd)
1419 {
1420
1421 switch (cmd) {
1422 case FCT_RESET:
1423 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1424 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel)));
1425 case FCT_ENABLE:
1426 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1427 return (0);
1428 case FCT_DISABLE:
1429 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1430 return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel)));
1431 }
1432 }
1433
1434 static int
1435 mgb_hw_teardown(struct mgb_softc *sc)
1436 {
1437 int err = 0;
1438
1439 /* Stop MAC */
1440 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1441 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1442 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1443 return (err);
1444 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1445 return (err);
1446 return (err);
1447 }
1448
1449 static int
1450 mgb_hw_init(struct mgb_softc *sc)
1451 {
1452 int error = 0;
1453
1454 error = mgb_hw_reset(sc);
1455 if (error != 0)
1456 goto fail;
1457
1458 mgb_mac_init(sc);
1459
1460 error = mgb_phy_reset(sc);
1461 if (error != 0)
1462 goto fail;
1463
1464 error = mgb_dmac_reset(sc);
1465 if (error != 0)
1466 goto fail;
1467
1468 fail:
1469 return (error);
1470 }
1471
1472 static int
1473 mgb_hw_reset(struct mgb_softc *sc)
1474 {
1475
1476 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1477 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1478 }
1479
1480 static int
1481 mgb_mac_init(struct mgb_softc *sc)
1482 {
1483
1484 /**
1485 * enable automatic duplex detection and
1486 * automatic speed detection
1487 */
1488 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1489 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1490 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1491
1492 return (MGB_STS_OK);
1493 }
1494
1495 static int
1496 mgb_phy_reset(struct mgb_softc *sc)
1497 {
1498
1499 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1500 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1501 MGB_STS_TIMEOUT)
1502 return (MGB_STS_TIMEOUT);
1503 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1504 }
1505
1506 static int
1507 mgb_dmac_reset(struct mgb_softc *sc)
1508 {
1509
1510 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1511 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1512 }
1513
1514 static int
1515 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1516 {
1517 int i, val;
1518
1519 i = 0;
1520 do {
1521 /*
1522 * XXX: Datasheets states delay should be > 5 microseconds
1523 * for device reset.
1524 */
1525 DELAY(100);
1526 val = CSR_READ_REG(sc, reg);
1527 if ((val & set_bits) == set_bits && (val & clear_bits) == 0)
1528 return (MGB_STS_OK);
1529 } while (i++ < MGB_TIMEOUT);
1530
1531 return (MGB_STS_TIMEOUT);
1532 }
1533
1534 static void
1535 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1536 {
1537
1538 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1539 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1540 }
1541
1542 static int
1543 mgb_miibus_readreg(device_t dev, int phy, int reg)
1544 {
1545 struct mgb_softc *sc;
1546 int mii_access;
1547
1548 sc = iflib_get_softc(device_get_softc(dev));
1549
1550 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1551 MGB_STS_TIMEOUT)
1552 return (EIO);
1553 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1554 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1555 mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1556 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1557 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1558 MGB_STS_TIMEOUT)
1559 return (EIO);
1560 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1561 }
1562
1563 static int
1564 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1565 {
1566 struct mgb_softc *sc;
1567 int mii_access;
1568
1569 sc = iflib_get_softc(device_get_softc(dev));
1570
1571 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1572 MGB_STS_TIMEOUT)
1573 return (EIO);
1574 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1575 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1576 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1577 CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1578 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1579 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1580 MGB_STS_TIMEOUT)
1581 return (EIO);
1582 return (0);
1583 }
1584
1585 /* XXX: May need to lock these up */
1586 static void
1587 mgb_miibus_statchg(device_t dev)
1588 {
1589 struct mgb_softc *sc;
1590 struct mii_data *miid;
1591
1592 sc = iflib_get_softc(device_get_softc(dev));
1593 miid = device_get_softc(sc->miibus);
1594 /* Update baudrate in iflib */
1595 sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1596 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1597 }
1598
1599 static void
1600 mgb_miibus_linkchg(device_t dev)
1601 {
1602 struct mgb_softc *sc;
1603 struct mii_data *miid;
1604 int link_state;
1605
1606 sc = iflib_get_softc(device_get_softc(dev));
1607 miid = device_get_softc(sc->miibus);
1608 /* XXX: copied from miibus_linkchg **/
1609 if (miid->mii_media_status & IFM_AVALID) {
1610 if (miid->mii_media_status & IFM_ACTIVE)
1611 link_state = LINK_STATE_UP;
1612 else
1613 link_state = LINK_STATE_DOWN;
1614 } else
1615 link_state = LINK_STATE_UNKNOWN;
1616 sc->link_state = link_state;
1617 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1618 }
Cache object: 4597723bd83745d4fcb53e911a732c1e
|