1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2020 Advanced Micro Devices, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Contact Information :
28 * Rajesh Kumar <rajesh1.kumar@amd.com>
29 * Shreyank Amartya <Shreyank.Amartya@amd.com>
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <sys/systm.h>
45
46 #include <net/if.h>
47 #include <net/if_media.h>
48
49 #include <dev/mii/mii.h>
50 #include <dev/mii/miivar.h>
51
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54
55 #include "xgbe.h"
56 #include "xgbe-common.h"
57
58 #include "miibus_if.h"
59 #include "ifdi_if.h"
60 #include "opt_inet.h"
61 #include "opt_inet6.h"
62
63 MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
64
65 extern struct if_txrx axgbe_txrx;
66 static int axgbe_sph_enable;
67
68 /* Function prototypes */
69 static void *axgbe_register(device_t);
70 static int axgbe_if_attach_pre(if_ctx_t);
71 static int axgbe_if_attach_post(if_ctx_t);
72 static int axgbe_if_detach(if_ctx_t);
73 static void axgbe_if_stop(if_ctx_t);
74 static void axgbe_if_init(if_ctx_t);
75
76 /* Queue related routines */
77 static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
78 static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
79 static int axgbe_alloc_channels(if_ctx_t);
80 static void axgbe_free_channels(struct axgbe_if_softc *);
81 static void axgbe_if_queues_free(if_ctx_t);
82 static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
83 static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
84
85 /* Interrupt related routines */
86 static void axgbe_if_disable_intr(if_ctx_t);
87 static void axgbe_if_enable_intr(if_ctx_t);
88 static int axgbe_if_msix_intr_assign(if_ctx_t, int);
89 static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int);
90
91 /* Init and Iflib routines */
92 static void axgbe_pci_init(struct xgbe_prv_data *);
93 static void axgbe_pci_stop(if_ctx_t);
94 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *);
95 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *);
96 static int axgbe_if_mtu_set(if_ctx_t, uint32_t);
97 static void axgbe_if_update_admin_status(if_ctx_t);
98 static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *);
99 static int axgbe_if_media_change(if_ctx_t);
100 static int axgbe_if_promisc_set(if_ctx_t, int);
101 static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter);
102 static void axgbe_if_vlan_register(if_ctx_t, uint16_t);
103 static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t);
104 #if __FreeBSD_version >= 1300000
105 static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
106 #endif
107 static void axgbe_set_counts(if_ctx_t);
108 static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *);
109
110 /* MII interface registered functions */
111 static int axgbe_miibus_readreg(device_t, int, int);
112 static int axgbe_miibus_writereg(device_t, int, int, int);
113 static void axgbe_miibus_statchg(device_t);
114
115 /* ISR routines */
116 static int axgbe_dev_isr(void *);
117 static void axgbe_ecc_isr(void *);
118 static void axgbe_i2c_isr(void *);
119 static void axgbe_an_isr(void *);
120 static int axgbe_msix_que(void *);
121
122 /* Timer routines */
123 static void xgbe_service(void *, int);
124 static void xgbe_service_timer(void *);
125 static void xgbe_init_timers(struct xgbe_prv_data *);
126 static void xgbe_stop_timers(struct xgbe_prv_data *);
127
128 /* Dump routines */
129 static void xgbe_dump_prop_registers(struct xgbe_prv_data *);
130
131 /*
132 * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the
133 * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X
134 * table.
135 */
136 static struct resource_spec axgbe_pci_mac_spec[] = {
137 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */
138 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */
139 { -1, 0 }
140 };
141
142 static pci_vendor_info_t axgbe_vendor_info_array[] =
143 {
144 PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"),
145 PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"),
146 PVID_END
147 };
148
149 static struct xgbe_version_data xgbe_v2a = {
150 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
151 .xpcs_access = XGBE_XPCS_ACCESS_V2,
152 .mmc_64bit = 1,
153 .tx_max_fifo_size = 229376,
154 .rx_max_fifo_size = 229376,
155 .tx_tstamp_workaround = 1,
156 .ecc_support = 1,
157 .i2c_support = 1,
158 .irq_reissue_support = 1,
159 .tx_desc_prefetch = 5,
160 .rx_desc_prefetch = 5,
161 .an_cdr_workaround = 1,
162 };
163
164 static struct xgbe_version_data xgbe_v2b = {
165 .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
166 .xpcs_access = XGBE_XPCS_ACCESS_V2,
167 .mmc_64bit = 1,
168 .tx_max_fifo_size = 65536,
169 .rx_max_fifo_size = 65536,
170 .tx_tstamp_workaround = 1,
171 .ecc_support = 1,
172 .i2c_support = 1,
173 .irq_reissue_support = 1,
174 .tx_desc_prefetch = 5,
175 .rx_desc_prefetch = 5,
176 .an_cdr_workaround = 1,
177 };
178
179 /* Device Interface */
180 static device_method_t ax_methods[] = {
181 DEVMETHOD(device_register, axgbe_register),
182 DEVMETHOD(device_probe, iflib_device_probe),
183 DEVMETHOD(device_attach, iflib_device_attach),
184 DEVMETHOD(device_detach, iflib_device_detach),
185
186 /* MII interface */
187 DEVMETHOD(miibus_readreg, axgbe_miibus_readreg),
188 DEVMETHOD(miibus_writereg, axgbe_miibus_writereg),
189 DEVMETHOD(miibus_statchg, axgbe_miibus_statchg),
190
191 DEVMETHOD_END
192 };
193
194 static driver_t ax_driver = {
195 "ax", ax_methods, sizeof(struct axgbe_if_softc),
196 };
197
198 DRIVER_MODULE(axp, pci, ax_driver, 0, 0);
199 DRIVER_MODULE(miibus, ax, miibus_driver, 0, 0);
200 IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array);
201
202 MODULE_DEPEND(ax, pci, 1, 1, 1);
203 MODULE_DEPEND(ax, ether, 1, 1, 1);
204 MODULE_DEPEND(ax, iflib, 1, 1, 1);
205 MODULE_DEPEND(ax, miibus, 1, 1, 1);
206
207 /* Iflib Interface */
208 static device_method_t axgbe_if_methods[] = {
209 DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre),
210 DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post),
211 DEVMETHOD(ifdi_detach, axgbe_if_detach),
212 DEVMETHOD(ifdi_init, axgbe_if_init),
213 DEVMETHOD(ifdi_stop, axgbe_if_stop),
214 DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign),
215 DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr),
216 DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr),
217 DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable),
218 DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable),
219 DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc),
220 DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc),
221 DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free),
222 DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status),
223 DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set),
224 DEVMETHOD(ifdi_media_status, axgbe_if_media_status),
225 DEVMETHOD(ifdi_media_change, axgbe_if_media_change),
226 DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set),
227 DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter),
228 DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register),
229 DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister),
230 #if __FreeBSD_version >= 1300000
231 DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart),
232 #endif
233 DEVMETHOD_END
234 };
235
236 static driver_t axgbe_if_driver = {
237 "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc)
238 };
239
240 /* Iflib Shared Context */
241 static struct if_shared_ctx axgbe_sctx_init = {
242 .isc_magic = IFLIB_MAGIC,
243 .isc_driver = &axgbe_if_driver,
244 .isc_q_align = PAGE_SIZE,
245 .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header),
246 .isc_tx_maxsegsize = PAGE_SIZE,
247 .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header),
248 .isc_tso_maxsegsize = PAGE_SIZE,
249 .isc_rx_maxsize = MJUM9BYTES,
250 .isc_rx_maxsegsize = MJUM9BYTES,
251 .isc_rx_nsegments = 1,
252 .isc_admin_intrcnt = 4,
253
254 .isc_vendor_info = axgbe_vendor_info_array,
255 .isc_driver_version = XGBE_DRV_VERSION,
256
257 .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN},
258 .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT},
259 .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX},
260
261 .isc_ntxqs = 1,
262 .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH |
263 IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD,
264 };
265
266 static void *
267 axgbe_register(device_t dev)
268 {
269 int axgbe_nfl;
270 int axgbe_nrxqs;
271 int error, i;
272 char *value = NULL;
273
274 value = kern_getenv("dev.ax.sph_enable");
275 if (value) {
276 axgbe_sph_enable = strtol(value, NULL, 10);
277 freeenv(value);
278 } else {
279 /*
280 * No tunable found, generate one with default values
281 * Note: only a reboot will reveal the new kenv
282 */
283 error = kern_setenv("dev.ax.sph_enable", "1");
284 if (error) {
285 printf("Error setting tunable, using default driver values\n");
286 }
287 axgbe_sph_enable = 1;
288 }
289
290 if (!axgbe_sph_enable) {
291 axgbe_nfl = 1;
292 axgbe_nrxqs = 1;
293 } else {
294 axgbe_nfl = 2;
295 axgbe_nrxqs = 2;
296 }
297
298 axgbe_sctx_init.isc_nfl = axgbe_nfl;
299 axgbe_sctx_init.isc_nrxqs = axgbe_nrxqs;
300
301 for (i = 0 ; i < axgbe_nrxqs ; i++) {
302 axgbe_sctx_init.isc_nrxd_min[i] = XGBE_RX_DESC_CNT_MIN;
303 axgbe_sctx_init.isc_nrxd_default[i] = XGBE_RX_DESC_CNT_DEFAULT;
304 axgbe_sctx_init.isc_nrxd_max[i] = XGBE_RX_DESC_CNT_MAX;
305 }
306
307 return (&axgbe_sctx_init);
308 }
309
310 /* MII Interface Functions */
311 static int
312 axgbe_miibus_readreg(device_t dev, int phy, int reg)
313 {
314 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
315 struct xgbe_prv_data *pdata = &sc->pdata;
316 int val;
317
318 axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg);
319
320 val = xgbe_phy_mii_read(pdata, phy, reg);
321
322 axgbe_printf(2, "%s: val 0x%x\n", __func__, val);
323 return (val & 0xFFFF);
324 }
325
326 static int
327 axgbe_miibus_writereg(device_t dev, int phy, int reg, int val)
328 {
329 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
330 struct xgbe_prv_data *pdata = &sc->pdata;
331
332 axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val);
333
334 xgbe_phy_mii_write(pdata, phy, reg, val);
335
336 return(0);
337 }
338
339 static void
340 axgbe_miibus_statchg(device_t dev)
341 {
342 struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev));
343 struct xgbe_prv_data *pdata = &sc->pdata;
344 struct mii_data *mii = device_get_softc(pdata->axgbe_miibus);
345 if_t ifp = pdata->netdev;
346 int bmsr;
347
348 axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link,
349 pdata->phy_link);
350
351 if (mii == NULL || ifp == NULL ||
352 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
353 return;
354
355 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
356 (IFM_ACTIVE | IFM_AVALID)) {
357
358 switch (IFM_SUBTYPE(mii->mii_media_active)) {
359 case IFM_10_T:
360 case IFM_100_TX:
361 pdata->phy.link = 1;
362 break;
363 case IFM_1000_T:
364 case IFM_1000_SX:
365 case IFM_2500_SX:
366 pdata->phy.link = 1;
367 break;
368 default:
369 pdata->phy.link = 0;
370 break;
371 }
372 } else
373 pdata->phy_link = 0;
374
375 bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR);
376 if (bmsr & BMSR_ANEG) {
377
378 axgbe_printf(2, "%s: Autoneg Done\n", __func__);
379
380 /* Raise AN Interrupt */
381 XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
382 XGBE_AN_CL73_INT_MASK);
383 }
384 }
385
386 static int
387 axgbe_if_attach_pre(if_ctx_t ctx)
388 {
389 struct axgbe_if_softc *sc;
390 struct xgbe_prv_data *pdata;
391 struct resource *mac_res[2];
392 if_softc_ctx_t scctx;
393 if_shared_ctx_t sctx;
394 device_t dev;
395 unsigned int ma_lo, ma_hi;
396 unsigned int reg;
397 int ret;
398
399 sc = iflib_get_softc(ctx);
400 sc->pdata.dev = dev = iflib_get_dev(ctx);
401 sc->sctx = sctx = iflib_get_sctx(ctx);
402 sc->scctx = scctx = iflib_get_softc_ctx(ctx);
403 sc->media = iflib_get_media(ctx);
404 sc->ctx = ctx;
405 sc->link_status = LINK_STATE_DOWN;
406 pdata = &sc->pdata;
407 pdata->netdev = iflib_get_ifp(ctx);
408
409 spin_lock_init(&pdata->xpcs_lock);
410
411 /* Initialize locks */
412 mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF);
413 mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN);
414
415 /* Allocate VLAN bitmap */
416 pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO);
417 pdata->num_active_vlans = 0;
418
419 /* Get the version data */
420 DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev));
421 if (pci_get_device(dev) == 0x1458)
422 sc->pdata.vdata = &xgbe_v2a;
423 else if (pci_get_device(dev) == 0x1459)
424 sc->pdata.vdata = &xgbe_v2b;
425
426 /* PCI setup */
427 if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res)) {
428 axgbe_error("Unable to allocate bus resources\n");
429 ret = ENXIO;
430 goto free_vlans;
431 }
432
433 sc->pdata.xgmac_res = mac_res[0];
434 sc->pdata.xpcs_res = mac_res[1];
435
436 /* Set the PCS indirect addressing definition registers*/
437 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
438 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
439
440 /* Configure the PCS indirect addressing support */
441 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
442 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
443 pdata->xpcs_window <<= 6;
444 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
445 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
446 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
447 DBGPR("xpcs window def : %#010x\n",
448 pdata->xpcs_window_def_reg);
449 DBGPR("xpcs window sel : %#010x\n",
450 pdata->xpcs_window_sel_reg);
451 DBGPR("xpcs window : %#010x\n",
452 pdata->xpcs_window);
453 DBGPR("xpcs window size : %#010x\n",
454 pdata->xpcs_window_size);
455 DBGPR("xpcs window mask : %#010x\n",
456 pdata->xpcs_window_mask);
457
458 /* Enable all interrupts in the hardware */
459 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
460
461 /* Retrieve the MAC address */
462 ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
463 ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
464 pdata->mac_addr[0] = ma_lo & 0xff;
465 pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
466 pdata->mac_addr[2] = (ma_lo >>16) & 0xff;
467 pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
468 pdata->mac_addr[4] = ma_hi & 0xff;
469 pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
470 if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) {
471 axgbe_error("Invalid mac address\n");
472 ret = EINVAL;
473 goto release_bus_resource;
474 }
475 iflib_set_mac(ctx, pdata->mac_addr);
476
477 /* Clock settings */
478 pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
479 pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
480
481 /* Set the DMA coherency values */
482 pdata->coherent = 1;
483 pdata->arcr = XGBE_DMA_PCI_ARCR;
484 pdata->awcr = XGBE_DMA_PCI_AWCR;
485 pdata->awarcr = XGBE_DMA_PCI_AWARCR;
486
487 /* Read the port property registers */
488 pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
489 pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
490 pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
491 pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
492 pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
493 DBGPR("port property 0 = %#010x\n", pdata->pp0);
494 DBGPR("port property 1 = %#010x\n", pdata->pp1);
495 DBGPR("port property 2 = %#010x\n", pdata->pp2);
496 DBGPR("port property 3 = %#010x\n", pdata->pp3);
497 DBGPR("port property 4 = %#010x\n", pdata->pp4);
498
499 /* Set the maximum channels and queues */
500 pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
501 MAX_TX_DMA);
502 pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
503 MAX_RX_DMA);
504 pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
505 MAX_TX_QUEUES);
506 pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
507 MAX_RX_QUEUES);
508 DBGPR("max tx/rx channel count = %u/%u\n",
509 pdata->tx_max_channel_count, pdata->rx_max_channel_count);
510 DBGPR("max tx/rx hw queue count = %u/%u\n",
511 pdata->tx_max_q_count, pdata->rx_max_q_count);
512
513 axgbe_set_counts(ctx);
514
515 /* Set the maximum fifo amounts */
516 pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
517 TX_FIFO_SIZE);
518 pdata->tx_max_fifo_size *= 16384;
519 pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
520 pdata->vdata->tx_max_fifo_size);
521 pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
522 RX_FIFO_SIZE);
523 pdata->rx_max_fifo_size *= 16384;
524 pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
525 pdata->vdata->rx_max_fifo_size);
526 DBGPR("max tx/rx max fifo size = %u/%u\n",
527 pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
528
529 /* Initialize IFLIB if_softc_ctx_t */
530 axgbe_init_iflib_softc_ctx(sc);
531
532 /* Alloc channels */
533 if (axgbe_alloc_channels(ctx)) {
534 axgbe_error("Unable to allocate channel memory\n");
535 ret = ENOMEM;
536 goto release_bus_resource;
537 }
538
539 TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
540
541 /* create the workqueue */
542 pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
543 taskqueue_thread_enqueue, &pdata->dev_workqueue);
544 if (pdata->dev_workqueue == NULL) {
545 axgbe_error("Unable to allocate workqueue\n");
546 ret = ENOMEM;
547 goto free_channels;
548 }
549 ret = taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET,
550 "axgbe dev taskq");
551 if (ret) {
552 axgbe_error("Unable to start taskqueue\n");
553 ret = ENOMEM;
554 goto free_task_queue;
555 }
556
557 /* Init timers */
558 xgbe_init_timers(pdata);
559
560 return (0);
561
562 free_task_queue:
563 taskqueue_free(pdata->dev_workqueue);
564
565 free_channels:
566 axgbe_free_channels(sc);
567
568 release_bus_resource:
569 bus_release_resources(dev, axgbe_pci_mac_spec, mac_res);
570
571 free_vlans:
572 free(pdata->active_vlans, M_AXGBE);
573
574 return (ret);
575 } /* axgbe_if_attach_pre */
576
577 static void
578 xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
579 {
580 xgbe_init_function_ptrs_dev(&pdata->hw_if);
581 xgbe_init_function_ptrs_phy(&pdata->phy_if);
582 xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
583 xgbe_init_function_ptrs_desc(&pdata->desc_if);
584
585 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
586 }
587
588 static void
589 axgbe_set_counts(if_ctx_t ctx)
590 {
591 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
592 struct xgbe_prv_data *pdata = &sc->pdata;
593 cpuset_t lcpus;
594 int cpu_count, err;
595 size_t len;
596
597 /* Set all function pointers */
598 xgbe_init_all_fptrs(pdata);
599
600 /* Populate the hardware features */
601 xgbe_get_all_hw_features(pdata);
602
603 if (!pdata->tx_max_channel_count)
604 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
605 if (!pdata->rx_max_channel_count)
606 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
607
608 if (!pdata->tx_max_q_count)
609 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
610 if (!pdata->rx_max_q_count)
611 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
612
613 /*
614 * Calculate the number of Tx and Rx rings to be created
615 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
616 * the number of Tx queues to the number of Tx channels
617 * enabled
618 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
619 * number of Rx queues or maximum allowed
620 */
621
622 /* Get cpu count from sysctl */
623 len = sizeof(cpu_count);
624 err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL,
625 0, NULL, 0);
626 if (err) {
627 axgbe_error("Unable to fetch number of cpus\n");
628 cpu_count = 1;
629 }
630
631 if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) {
632 axgbe_error("Unable to fetch CPU list\n");
633 /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */
634 }
635
636 DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus));
637
638 pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt);
639 pdata->tx_ring_count = min(pdata->tx_ring_count,
640 pdata->tx_max_channel_count);
641 pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count);
642
643 pdata->tx_q_count = pdata->tx_ring_count;
644
645 pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt);
646 pdata->rx_ring_count = min(pdata->rx_ring_count,
647 pdata->rx_max_channel_count);
648
649 pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count);
650
651 DBGPR("TX/RX max channel count = %u/%u\n",
652 pdata->tx_max_channel_count, pdata->rx_max_channel_count);
653 DBGPR("TX/RX max queue count = %u/%u\n",
654 pdata->tx_max_q_count, pdata->rx_max_q_count);
655 DBGPR("TX/RX DMA ring count = %u/%u\n",
656 pdata->tx_ring_count, pdata->rx_ring_count);
657 DBGPR("TX/RX hardware queue count = %u/%u\n",
658 pdata->tx_q_count, pdata->rx_q_count);
659 } /* axgbe_set_counts */
660
661 static void
662 axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc)
663 {
664 struct xgbe_prv_data *pdata = &sc->pdata;
665 if_softc_ctx_t scctx = sc->scctx;
666 if_shared_ctx_t sctx = sc->sctx;
667 int i;
668
669 scctx->isc_nrxqsets = pdata->rx_q_count;
670 scctx->isc_ntxqsets = pdata->tx_q_count;
671 scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev);
672 scctx->isc_tx_nsegments = 32;
673
674 for (i = 0; i < sctx->isc_ntxqs; i++) {
675 scctx->isc_txqsizes[i] =
676 roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc),
677 128);
678 scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc);
679 }
680
681 for (i = 0; i < sctx->isc_nrxqs; i++) {
682 scctx->isc_rxqsizes[i] =
683 roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc),
684 128);
685 scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc);
686 }
687
688 scctx->isc_tx_tso_segments_max = 32;
689 scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE;
690 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
691
692 /*
693 * Set capabilities
694 * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly
695 * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in
696 * IFCAP_HWCSUM) is set
697 */
698 scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP |
699 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 |
700 CSUM_TSO);
701 scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
702 IFCAP_JUMBO_MTU |
703 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER |
704 IFCAP_VLAN_HWCSUM |
705 IFCAP_TSO | IFCAP_VLAN_HWTSO);
706 scctx->isc_capabilities = scctx->isc_capenable;
707
708 /*
709 * Set rss_table_size alone when adding RSS support. rss_table_mask
710 * will be set by IFLIB based on rss_table_size
711 */
712 scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE;
713
714 scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES;
715 scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES;
716
717 scctx->isc_txrx = &axgbe_txrx;
718 }
719
720 static int
721 axgbe_alloc_channels(if_ctx_t ctx)
722 {
723 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
724 struct xgbe_prv_data *pdata = &sc->pdata;
725 struct xgbe_channel *channel;
726 int i, j, count;
727
728 DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count,
729 pdata->rx_ring_count);
730
731 /* Iflibe sets based on isc_ntxqsets/nrxqsets */
732 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
733
734 /* Allocate channel memory */
735 for (i = 0; i < count ; i++) {
736 channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel),
737 M_AXGBE, M_NOWAIT | M_ZERO);
738
739 if (channel == NULL) {
740 for (j = 0; j < i; j++) {
741 free(pdata->channel[j], M_AXGBE);
742 pdata->channel[j] = NULL;
743 }
744 return (ENOMEM);
745 }
746
747 pdata->channel[i] = channel;
748 }
749
750 pdata->total_channel_count = count;
751 DBGPR("Channel count set to: %u\n", pdata->total_channel_count);
752
753 for (i = 0; i < count; i++) {
754
755 channel = pdata->channel[i];
756 snprintf(channel->name, sizeof(channel->name), "channel-%d",i);
757
758 channel->pdata = pdata;
759 channel->queue_index = i;
760 channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
761 bus_space_subregion(channel->dma_tag,
762 rman_get_bushandle(pdata->xgmac_res),
763 DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
764 &channel->dma_handle);
765 channel->tx_ring = NULL;
766 channel->rx_ring = NULL;
767 }
768
769 return (0);
770 } /* axgbe_alloc_channels */
771
772 static void
773 axgbe_free_channels(struct axgbe_if_softc *sc)
774 {
775 struct xgbe_prv_data *pdata = &sc->pdata;
776 int i;
777
778 for (i = 0; i < pdata->total_channel_count ; i++) {
779 free(pdata->channel[i], M_AXGBE);
780 pdata->channel[i] = NULL;
781 }
782
783 pdata->total_channel_count = 0;
784 pdata->channel_count = 0;
785 }
786
787 static void
788 xgbe_service(void *ctx, int pending)
789 {
790 struct xgbe_prv_data *pdata = ctx;
791 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata;
792 bool prev_state = false;
793
794 /* Get previous link status */
795 prev_state = pdata->phy.link;
796
797 pdata->phy_if.phy_status(pdata);
798
799 if (prev_state != pdata->phy.link) {
800 pdata->phy_link = pdata->phy.link;
801 axgbe_if_update_admin_status(sc->ctx);
802 }
803
804 callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata);
805 }
806
807 static void
808 xgbe_service_timer(void *data)
809 {
810 struct xgbe_prv_data *pdata = data;
811
812 taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
813 }
814
815 static void
816 xgbe_init_timers(struct xgbe_prv_data *pdata)
817 {
818 callout_init(&pdata->service_timer, 1);
819 }
820
821 static void
822 xgbe_start_timers(struct xgbe_prv_data *pdata)
823 {
824 callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata);
825 }
826
827 static void
828 xgbe_stop_timers(struct xgbe_prv_data *pdata)
829 {
830 callout_drain(&pdata->service_timer);
831 callout_stop(&pdata->service_timer);
832 }
833
834 static void
835 xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
836 {
837 axgbe_printf(1, "\n************* PHY Reg dump *********************\n");
838
839 axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
840 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
841 axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
842 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
843 axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
844 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
845 axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
846 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
847 axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
848 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
849 axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
850 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
851 axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
852 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
853 axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
854 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
855 axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
856 MDIO_AN_ADVERTISE,
857 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
858 axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
859 MDIO_AN_ADVERTISE + 1,
860 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
861 axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
862 MDIO_AN_ADVERTISE + 2,
863 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
864 axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
865 MDIO_AN_COMP_STAT,
866 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
867
868 axgbe_printf(1, "\n************************************************\n");
869 }
870
871 static void
872 xgbe_dump_prop_registers(struct xgbe_prv_data *pdata)
873 {
874 int i;
875
876 axgbe_printf(1, "\n************* PROP Reg dump ********************\n");
877
878 for (i = 0 ; i < 38 ; i++) {
879 axgbe_printf(1, "PROP Offset 0x%08x = %08x\n",
880 (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata,
881 (XP_PROP_0 + (i * 4))));
882 }
883 }
884
885 static void
886 xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch)
887 {
888 struct xgbe_channel *channel;
889 int i;
890
891 axgbe_printf(1, "\n************* DMA Reg dump *********************\n");
892
893 axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR,
894 XGMAC_IOREAD(pdata, DMA_MR));
895 axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR,
896 XGMAC_IOREAD(pdata, DMA_SBMR));
897 axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR,
898 XGMAC_IOREAD(pdata, DMA_ISR));
899 axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR,
900 XGMAC_IOREAD(pdata, DMA_AXIARCR));
901 axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR,
902 XGMAC_IOREAD(pdata, DMA_AXIAWCR));
903 axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR,
904 XGMAC_IOREAD(pdata, DMA_AXIAWARCR));
905 axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0,
906 XGMAC_IOREAD(pdata, DMA_DSR0));
907 axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1,
908 XGMAC_IOREAD(pdata, DMA_DSR1));
909 axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2,
910 XGMAC_IOREAD(pdata, DMA_DSR2));
911 axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3,
912 XGMAC_IOREAD(pdata, DMA_DSR3));
913 axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4,
914 XGMAC_IOREAD(pdata, DMA_DSR4));
915 axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR,
916 XGMAC_IOREAD(pdata, DMA_TXEDMACR));
917 axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR,
918 XGMAC_IOREAD(pdata, DMA_RXEDMACR));
919
920 for (i = 0 ; i < 8 ; i++ ) {
921
922 if (ch >= 0) {
923 if (i != ch)
924 continue;
925 }
926
927 channel = pdata->channel[i];
928
929 axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i);
930
931 axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n",
932 DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR));
933 axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n",
934 DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR));
935 axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n",
936 DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR));
937 axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n",
938 DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI));
939 axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n",
940 DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO));
941 axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n",
942 DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI));
943 axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n",
944 DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO));
945 axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n",
946 DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO));
947 axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n",
948 DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO));
949 axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n",
950 DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR));
951 axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n",
952 DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR));
953 axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n",
954 DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER));
955 axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n",
956 DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT));
957 axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n",
958 DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO));
959 axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n",
960 DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO));
961 axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n",
962 DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI));
963 axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n",
964 DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO));
965 axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n",
966 DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI));
967 axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n",
968 DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO));
969 axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n",
970 DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
971 axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n",
972 DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR));
973 axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n",
974 DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL));
975 axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n",
976 DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC));
977 axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n",
978 DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO));
979 axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n",
980 DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO));
981 axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n",
982 DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO));
983 axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n",
984 DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO));
985 }
986 }
987
988 static void
989 xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata)
990 {
991 int i;
992
993 axgbe_printf(1, "\n************* MTL Reg dump *********************\n");
994
995 axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR,
996 XGMAC_IOREAD(pdata, MTL_OMR));
997 axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR,
998 XGMAC_IOREAD(pdata, MTL_FDCR));
999 axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR,
1000 XGMAC_IOREAD(pdata, MTL_FDSR));
1001 axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR,
1002 XGMAC_IOREAD(pdata, MTL_FDDR));
1003 axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR,
1004 XGMAC_IOREAD(pdata, MTL_ISR));
1005 axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R,
1006 XGMAC_IOREAD(pdata, MTL_RQDCM0R));
1007 axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R,
1008 XGMAC_IOREAD(pdata, MTL_RQDCM1R));
1009 axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R,
1010 XGMAC_IOREAD(pdata, MTL_RQDCM2R));
1011 axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R,
1012 XGMAC_IOREAD(pdata, MTL_TCPM0R));
1013 axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R,
1014 XGMAC_IOREAD(pdata, MTL_TCPM1R));
1015
1016 for (i = 0 ; i < 8 ; i++ ) {
1017
1018 axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i);
1019
1020 axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n",
1021 MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR));
1022 axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n",
1023 MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR));
1024 axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n",
1025 MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR));
1026 axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n",
1027 MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR));
1028 axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n",
1029 MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR));
1030 axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n",
1031 MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR));
1032
1033 axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n",
1034 MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR));
1035 axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n",
1036 MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR));
1037 axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n",
1038 MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR));
1039 axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n",
1040 MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR));
1041 axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n",
1042 MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR));
1043 axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n",
1044 MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER));
1045 axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n",
1046 MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR));
1047 }
1048 }
1049
1050 static void
1051 xgbe_dump_mac_registers(struct xgbe_prv_data *pdata)
1052 {
1053 axgbe_printf(1, "\n************* MAC Reg dump **********************\n");
1054
1055 axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR,
1056 XGMAC_IOREAD(pdata, MAC_TCR));
1057 axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR,
1058 XGMAC_IOREAD(pdata, MAC_RCR));
1059 axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR,
1060 XGMAC_IOREAD(pdata, MAC_PFR));
1061 axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR,
1062 XGMAC_IOREAD(pdata, MAC_WTR));
1063 axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0,
1064 XGMAC_IOREAD(pdata, MAC_HTR0));
1065 axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1,
1066 XGMAC_IOREAD(pdata, MAC_HTR1));
1067 axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2,
1068 XGMAC_IOREAD(pdata, MAC_HTR2));
1069 axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3,
1070 XGMAC_IOREAD(pdata, MAC_HTR3));
1071 axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4,
1072 XGMAC_IOREAD(pdata, MAC_HTR4));
1073 axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5,
1074 XGMAC_IOREAD(pdata, MAC_HTR5));
1075 axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6,
1076 XGMAC_IOREAD(pdata, MAC_HTR6));
1077 axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7,
1078 XGMAC_IOREAD(pdata, MAC_HTR7));
1079 axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR,
1080 XGMAC_IOREAD(pdata, MAC_VLANTR));
1081 axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR,
1082 XGMAC_IOREAD(pdata, MAC_VLANHTR));
1083 axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR,
1084 XGMAC_IOREAD(pdata, MAC_VLANIR));
1085 axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR,
1086 XGMAC_IOREAD(pdata, MAC_IVLANIR));
1087 axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR,
1088 XGMAC_IOREAD(pdata, MAC_RETMR));
1089 axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR,
1090 XGMAC_IOREAD(pdata, MAC_Q0TFCR));
1091 axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR,
1092 XGMAC_IOREAD(pdata, MAC_Q1TFCR));
1093 axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR,
1094 XGMAC_IOREAD(pdata, MAC_Q2TFCR));
1095 axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR,
1096 XGMAC_IOREAD(pdata, MAC_Q3TFCR));
1097 axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR,
1098 XGMAC_IOREAD(pdata, MAC_Q4TFCR));
1099 axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR,
1100 XGMAC_IOREAD(pdata, MAC_Q5TFCR));
1101 axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR,
1102 XGMAC_IOREAD(pdata, MAC_Q6TFCR));
1103 axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR,
1104 XGMAC_IOREAD(pdata, MAC_Q7TFCR));
1105 axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR,
1106 XGMAC_IOREAD(pdata, MAC_RFCR));
1107 axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R,
1108 XGMAC_IOREAD(pdata, MAC_RQC0R));
1109 axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R,
1110 XGMAC_IOREAD(pdata, MAC_RQC1R));
1111 axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R,
1112 XGMAC_IOREAD(pdata, MAC_RQC2R));
1113 axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R,
1114 XGMAC_IOREAD(pdata, MAC_RQC3R));
1115 axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR,
1116 XGMAC_IOREAD(pdata, MAC_ISR));
1117 axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER,
1118 XGMAC_IOREAD(pdata, MAC_IER));
1119 axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR,
1120 XGMAC_IOREAD(pdata, MAC_RTSR));
1121 axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR,
1122 XGMAC_IOREAD(pdata, MAC_PMTCSR));
1123 axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR,
1124 XGMAC_IOREAD(pdata, MAC_RWKPFR));
1125 axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR,
1126 XGMAC_IOREAD(pdata, MAC_LPICSR));
1127 axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR,
1128 XGMAC_IOREAD(pdata, MAC_LPITCR));
1129 axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR,
1130 XGMAC_IOREAD(pdata, MAC_TIR));
1131 axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR,
1132 XGMAC_IOREAD(pdata, MAC_VR));
1133 axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR,
1134 XGMAC_IOREAD(pdata, MAC_DR));
1135 axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R,
1136 XGMAC_IOREAD(pdata, MAC_HWF0R));
1137 axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R,
1138 XGMAC_IOREAD(pdata, MAC_HWF1R));
1139 axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R,
1140 XGMAC_IOREAD(pdata, MAC_HWF2R));
1141 axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR,
1142 XGMAC_IOREAD(pdata, MAC_MDIOSCAR));
1143 axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR,
1144 XGMAC_IOREAD(pdata, MAC_MDIOSCCDR));
1145 axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR,
1146 XGMAC_IOREAD(pdata, MAC_MDIOISR));
1147 axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER,
1148 XGMAC_IOREAD(pdata, MAC_MDIOIER));
1149 axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R,
1150 XGMAC_IOREAD(pdata, MAC_MDIOCL22R));
1151 axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR,
1152 XGMAC_IOREAD(pdata, MAC_GPIOCR));
1153 axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR,
1154 XGMAC_IOREAD(pdata, MAC_GPIOSR));
1155 axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR,
1156 XGMAC_IOREAD(pdata, MAC_MACA0HR));
1157 axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR,
1158 XGMAC_IOREAD(pdata, MAC_MACA0LR));
1159 axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR,
1160 XGMAC_IOREAD(pdata, MAC_MACA1HR));
1161 axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR,
1162 XGMAC_IOREAD(pdata, MAC_MACA1LR));
1163 axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR,
1164 XGMAC_IOREAD(pdata, MAC_RSSCR));
1165 axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR,
1166 XGMAC_IOREAD(pdata, MAC_RSSDR));
1167 axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR,
1168 XGMAC_IOREAD(pdata, MAC_RSSAR));
1169 axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR,
1170 XGMAC_IOREAD(pdata, MAC_TSCR));
1171 axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR,
1172 XGMAC_IOREAD(pdata, MAC_SSIR));
1173 axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR,
1174 XGMAC_IOREAD(pdata, MAC_STSR));
1175 axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR,
1176 XGMAC_IOREAD(pdata, MAC_STNR));
1177 axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR,
1178 XGMAC_IOREAD(pdata, MAC_STSUR));
1179 axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR,
1180 XGMAC_IOREAD(pdata, MAC_STNUR));
1181 axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR,
1182 XGMAC_IOREAD(pdata, MAC_TSAR));
1183 axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR,
1184 XGMAC_IOREAD(pdata, MAC_TSSR));
1185 axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR,
1186 XGMAC_IOREAD(pdata, MAC_TXSNR));
1187 axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR,
1188 XGMAC_IOREAD(pdata, MAC_TXSSR));
1189 }
1190
1191 static void
1192 xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata)
1193 {
1194 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1195
1196 axgbe_printf(1, "\n************* RMON counters dump ***************\n");
1197
1198 pdata->hw_if.read_mmc_stats(pdata);
1199
1200 axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n",
1201 MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb);
1202 axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n",
1203 MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb);
1204 axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n",
1205 MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g);
1206 axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n",
1207 MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g);
1208 axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n",
1209 MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb);
1210 axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n",
1211 MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb);
1212 axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n",
1213 MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb);
1214 axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n",
1215 MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb);
1216 axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n",
1217 MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb);
1218 axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n",
1219 MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb);
1220 axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n",
1221 MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb);
1222 axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n",
1223 MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb);
1224 axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n",
1225 MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb);
1226 axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n",
1227 MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror);
1228 axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n",
1229 MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g);
1230 axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n",
1231 MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g);
1232 axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n",
1233 MMC_TXPAUSEFRAMES_LO, stats->txpauseframes);
1234 axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n",
1235 MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g);
1236 axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n",
1237 MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb);
1238 axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n",
1239 MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb);
1240 axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n",
1241 MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g);
1242 axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n",
1243 MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g);
1244 axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n",
1245 MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g);
1246 axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n",
1247 MMC_RXCRCERROR_LO, stats->rxcrcerror);
1248 axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n",
1249 MMC_RXRUNTERROR, stats->rxrunterror);
1250 axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n",
1251 MMC_RXJABBERERROR, stats->rxjabbererror);
1252 axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n",
1253 MMC_RXUNDERSIZE_G, stats->rxundersize_g);
1254 axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n",
1255 MMC_RXOVERSIZE_G, stats->rxoversize_g);
1256 axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n",
1257 MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb);
1258 axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n",
1259 MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb);
1260 axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n",
1261 MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb);
1262 axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n",
1263 MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb);
1264 axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n",
1265 MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb);
1266 axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n",
1267 MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb);
1268 axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n",
1269 MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g);
1270 axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n",
1271 MMC_RXLENGTHERROR_LO, stats->rxlengtherror);
1272 axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n",
1273 MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype);
1274 axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n",
1275 MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes);
1276 axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n",
1277 MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow);
1278 axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n",
1279 MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb);
1280 axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n",
1281 MMC_RXWATCHDOGERROR, stats->rxwatchdogerror);
1282 }
1283
1284 void
1285 xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata)
1286 {
1287 axgbe_printf(1, "*************** I2C Registers **************\n");
1288 axgbe_printf(1, " IC_CON : %010x\n",
1289 XI2C_IOREAD(pdata, 0x00));
1290 axgbe_printf(1, " IC_TAR : %010x\n",
1291 XI2C_IOREAD(pdata, 0x04));
1292 axgbe_printf(1, " IC_HS_MADDR : %010x\n",
1293 XI2C_IOREAD(pdata, 0x0c));
1294 axgbe_printf(1, " IC_INTR_STAT : %010x\n",
1295 XI2C_IOREAD(pdata, 0x2c));
1296 axgbe_printf(1, " IC_INTR_MASK : %010x\n",
1297 XI2C_IOREAD(pdata, 0x30));
1298 axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n",
1299 XI2C_IOREAD(pdata, 0x34));
1300 axgbe_printf(1, " IC_RX_TL : %010x\n",
1301 XI2C_IOREAD(pdata, 0x38));
1302 axgbe_printf(1, " IC_TX_TL : %010x\n",
1303 XI2C_IOREAD(pdata, 0x3c));
1304 axgbe_printf(1, " IC_ENABLE : %010x\n",
1305 XI2C_IOREAD(pdata, 0x6c));
1306 axgbe_printf(1, " IC_STATUS : %010x\n",
1307 XI2C_IOREAD(pdata, 0x70));
1308 axgbe_printf(1, " IC_TXFLR : %010x\n",
1309 XI2C_IOREAD(pdata, 0x74));
1310 axgbe_printf(1, " IC_RXFLR : %010x\n",
1311 XI2C_IOREAD(pdata, 0x78));
1312 axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n",
1313 XI2C_IOREAD(pdata, 0x9c));
1314 axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n",
1315 XI2C_IOREAD(pdata, 0xf4));
1316 }
1317
1318 static void
1319 xgbe_dump_active_vlans(struct xgbe_prv_data *pdata)
1320 {
1321 int i;
1322
1323 for(i=0 ; i<BITS_TO_LONGS(VLAN_NVID); i++) {
1324 if (i && (i%8 == 0))
1325 axgbe_printf(1, "\n");
1326 axgbe_printf(1, "vlans[%d]: 0x%08lx ", i, pdata->active_vlans[i]);
1327 }
1328 axgbe_printf(1, "\n");
1329 }
1330
1331 static void
1332 xgbe_default_config(struct xgbe_prv_data *pdata)
1333 {
1334 pdata->blen = DMA_SBMR_BLEN_64;
1335 pdata->pbl = DMA_PBL_128;
1336 pdata->aal = 1;
1337 pdata->rd_osr_limit = 8;
1338 pdata->wr_osr_limit = 8;
1339 pdata->tx_sf_mode = MTL_TSF_ENABLE;
1340 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1341 pdata->tx_osp_mode = DMA_OSP_ENABLE;
1342 pdata->rx_sf_mode = MTL_RSF_DISABLE;
1343 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1344 pdata->pause_autoneg = 1;
1345 pdata->tx_pause = 1;
1346 pdata->rx_pause = 1;
1347 pdata->phy_speed = SPEED_UNKNOWN;
1348 pdata->power_down = 0;
1349 pdata->enable_rss = 1;
1350 }
1351
1352 static int
1353 axgbe_if_attach_post(if_ctx_t ctx)
1354 {
1355 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1356 struct xgbe_prv_data *pdata = &sc->pdata;
1357 if_t ifp = pdata->netdev;
1358 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1359 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1360 if_softc_ctx_t scctx = sc->scctx;
1361 int i, ret;
1362
1363 /* set split header support based on tunable */
1364 pdata->sph_enable = axgbe_sph_enable;
1365
1366 /* Initialize ECC timestamps */
1367 pdata->tx_sec_period = ticks;
1368 pdata->tx_ded_period = ticks;
1369 pdata->rx_sec_period = ticks;
1370 pdata->rx_ded_period = ticks;
1371 pdata->desc_sec_period = ticks;
1372 pdata->desc_ded_period = ticks;
1373
1374 /* Reset the hardware */
1375 ret = hw_if->exit(&sc->pdata);
1376 if (ret)
1377 axgbe_error("%s: exit error %d\n", __func__, ret);
1378
1379 /* Configure the defaults */
1380 xgbe_default_config(pdata);
1381
1382 /* Set default max values if not provided */
1383 if (!pdata->tx_max_fifo_size)
1384 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1385 if (!pdata->rx_max_fifo_size)
1386 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1387
1388 DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__,
1389 pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
1390
1391 /* Set and validate the number of descriptors for a ring */
1392 MPASS(powerof2(XGBE_TX_DESC_CNT));
1393 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
1394 MPASS(powerof2(XGBE_RX_DESC_CNT));
1395 pdata->rx_desc_count = XGBE_RX_DESC_CNT;
1396
1397 /* Adjust the number of queues based on interrupts assigned */
1398 if (pdata->channel_irq_count) {
1399 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
1400 pdata->channel_irq_count);
1401 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
1402 pdata->channel_irq_count);
1403
1404 DBGPR("adjusted TX %u/%u RX %u/%u\n",
1405 pdata->tx_ring_count, pdata->tx_q_count,
1406 pdata->rx_ring_count, pdata->rx_q_count);
1407 }
1408
1409 /* Set channel count based on interrupts assigned */
1410 pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets,
1411 scctx->isc_nrxqsets);
1412 DBGPR("Channel count set to: %u\n", pdata->channel_count);
1413
1414 /* Get RSS key */
1415 #ifdef RSS
1416 rss_getkey((uint8_t *)pdata->rss_key);
1417 #else
1418 arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0);
1419 #endif
1420 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
1421 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
1422 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
1423
1424 /* Initialize the PHY device */
1425 pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
1426 phy_if->phy_init(pdata);
1427
1428 /* Set the coalescing */
1429 xgbe_init_rx_coalesce(&sc->pdata);
1430 xgbe_init_tx_coalesce(&sc->pdata);
1431
1432 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1433 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1434 ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1435 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1436 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1437 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1438 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1439 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1440 ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL);
1441 ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1442 ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL);
1443 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1444 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1445
1446 /* Initialize the phy */
1447 pdata->phy_link = -1;
1448 pdata->phy_speed = SPEED_UNKNOWN;
1449 ret = phy_if->phy_reset(pdata);
1450 if (ret)
1451 return (ret);
1452
1453 /* Calculate the Rx buffer size before allocating rings */
1454 ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev));
1455 pdata->rx_buf_size = ret;
1456 DBGPR("%s: rx_buf_size %d\n", __func__, ret);
1457
1458 /* Setup RSS lookup table */
1459 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1460 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1461 i % pdata->rx_ring_count);
1462
1463 /*
1464 * Mark the device down until it is initialized, which happens
1465 * when the device is accessed first (for configuring the iface,
1466 * eg: setting IP)
1467 */
1468 set_bit(XGBE_DOWN, &pdata->dev_state);
1469
1470 DBGPR("mtu %d\n", if_getmtu(ifp));
1471 scctx->isc_max_frame_size = if_getmtu(ifp) + 18;
1472 scctx->isc_min_frame_size = XGMAC_MIN_PACKET;
1473
1474 axgbe_sysctl_init(pdata);
1475
1476 axgbe_pci_init(pdata);
1477
1478 return (0);
1479 } /* axgbe_if_attach_post */
1480
1481 static void
1482 xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag,
1483 int rid)
1484 {
1485 if (tag)
1486 bus_teardown_intr(pdata->dev, res, tag);
1487
1488 if (res)
1489 bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res);
1490 }
1491
1492 static void
1493 axgbe_interrupts_free(if_ctx_t ctx)
1494 {
1495 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1496 struct xgbe_prv_data *pdata = &sc->pdata;
1497 if_softc_ctx_t scctx = sc->scctx;
1498 struct xgbe_channel *channel;
1499 struct if_irq irq;
1500 int i;
1501
1502 axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr);
1503
1504 /* Free dev_irq */
1505 iflib_irq_free(ctx, &pdata->dev_irq);
1506
1507 /* Free ecc_irq */
1508 xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag,
1509 pdata->ecc_rid);
1510
1511 /* Free i2c_irq */
1512 xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag,
1513 pdata->i2c_rid);
1514
1515 /* Free an_irq */
1516 xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag,
1517 pdata->an_rid);
1518
1519 for (i = 0; i < scctx->isc_nrxqsets; i++) {
1520
1521 channel = pdata->channel[i];
1522 axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid);
1523 irq.ii_res = channel->dma_irq_res;
1524 irq.ii_tag = channel->dma_irq_tag;
1525 iflib_irq_free(ctx, &irq);
1526 }
1527 }
1528
1529 static int
1530 axgbe_if_detach(if_ctx_t ctx)
1531 {
1532 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1533 struct xgbe_prv_data *pdata = &sc->pdata;
1534 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1535 struct resource *mac_res[2];
1536
1537 mac_res[0] = pdata->xgmac_res;
1538 mac_res[1] = pdata->xpcs_res;
1539
1540 phy_if->phy_exit(pdata);
1541
1542 /* Free Interrupts */
1543 axgbe_interrupts_free(ctx);
1544
1545 /* Free workqueues */
1546 taskqueue_free(pdata->dev_workqueue);
1547
1548 /* Release bus resources */
1549 bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res);
1550
1551 /* Free VLAN bitmap */
1552 free(pdata->active_vlans, M_AXGBE);
1553
1554 axgbe_sysctl_exit(pdata);
1555
1556 return (0);
1557 } /* axgbe_if_detach */
1558
1559 static void
1560 axgbe_pci_init(struct xgbe_prv_data *pdata)
1561 {
1562 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1563 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1564 int ret = 0;
1565
1566 if (!__predict_false((test_bit(XGBE_DOWN, &pdata->dev_state)))) {
1567 axgbe_printf(1, "%s: Starting when XGBE_UP\n", __func__);
1568 return;
1569 }
1570
1571 hw_if->init(pdata);
1572
1573 ret = phy_if->phy_start(pdata);
1574 if (ret) {
1575 axgbe_error("%s: phy start %d\n", __func__, ret);
1576 ret = hw_if->exit(pdata);
1577 if (ret)
1578 axgbe_error("%s: exit error %d\n", __func__, ret);
1579 return;
1580 }
1581
1582 hw_if->enable_tx(pdata);
1583 hw_if->enable_rx(pdata);
1584
1585 xgbe_start_timers(pdata);
1586
1587 clear_bit(XGBE_DOWN, &pdata->dev_state);
1588
1589 xgbe_dump_phy_registers(pdata);
1590 xgbe_dump_prop_registers(pdata);
1591 xgbe_dump_dma_registers(pdata, -1);
1592 xgbe_dump_mtl_registers(pdata);
1593 xgbe_dump_mac_registers(pdata);
1594 xgbe_dump_rmon_counters(pdata);
1595 }
1596
1597 static void
1598 axgbe_if_init(if_ctx_t ctx)
1599 {
1600 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1601 struct xgbe_prv_data *pdata = &sc->pdata;
1602
1603 axgbe_pci_init(pdata);
1604 }
1605
1606 static void
1607 axgbe_pci_stop(if_ctx_t ctx)
1608 {
1609 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1610 struct xgbe_prv_data *pdata = &sc->pdata;
1611 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1612 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1613 int ret;
1614
1615 if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) {
1616 axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__);
1617 return;
1618 }
1619
1620 xgbe_stop_timers(pdata);
1621 taskqueue_drain_all(pdata->dev_workqueue);
1622
1623 hw_if->disable_tx(pdata);
1624 hw_if->disable_rx(pdata);
1625
1626 phy_if->phy_stop(pdata);
1627
1628 ret = hw_if->exit(pdata);
1629 if (ret)
1630 axgbe_error("%s: exit error %d\n", __func__, ret);
1631
1632 set_bit(XGBE_DOWN, &pdata->dev_state);
1633 }
1634
1635 static void
1636 axgbe_if_stop(if_ctx_t ctx)
1637 {
1638 axgbe_pci_stop(ctx);
1639 }
1640
1641 static void
1642 axgbe_if_disable_intr(if_ctx_t ctx)
1643 {
1644 /* TODO - implement */
1645 }
1646
1647 static void
1648 axgbe_if_enable_intr(if_ctx_t ctx)
1649 {
1650 /* TODO - implement */
1651 }
1652
1653 static int
1654 axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs,
1655 int ntxqsets)
1656 {
1657 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1658 struct xgbe_prv_data *pdata = &sc->pdata;
1659 if_softc_ctx_t scctx = sc->scctx;
1660 struct xgbe_channel *channel;
1661 struct xgbe_ring *tx_ring;
1662 int i, j, k;
1663
1664 MPASS(scctx->isc_ntxqsets > 0);
1665 MPASS(scctx->isc_ntxqsets == ntxqsets);
1666 MPASS(ntxqs == 1);
1667
1668 axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__,
1669 scctx->isc_ntxqsets, ntxqsets, ntxqs);
1670
1671 for (i = 0 ; i < ntxqsets; i++) {
1672
1673 channel = pdata->channel[i];
1674
1675 tx_ring = (struct xgbe_ring*)malloc(ntxqs *
1676 sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO);
1677
1678 if (tx_ring == NULL) {
1679 axgbe_error("Unable to allocate TX ring memory\n");
1680 goto tx_ring_fail;
1681 }
1682
1683 channel->tx_ring = tx_ring;
1684
1685 for (j = 0; j < ntxqs; j++, tx_ring++) {
1686 tx_ring->rdata =
1687 (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] *
1688 sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT);
1689
1690 /* Get the virtual & physical address of hw queues */
1691 tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j];
1692 tx_ring->rdesc_paddr = pa[i*ntxqs + j];
1693 tx_ring->rdesc_count = scctx->isc_ntxd[j];
1694 spin_lock_init(&tx_ring->lock);
1695 }
1696 }
1697
1698 axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets);
1699
1700 return (0);
1701
1702 tx_ring_fail:
1703
1704 for (j = 0; j < i ; j++) {
1705
1706 channel = pdata->channel[j];
1707
1708 tx_ring = channel->tx_ring;
1709 for (k = 0; k < ntxqs ; k++, tx_ring++) {
1710 if (tx_ring && tx_ring->rdata)
1711 free(tx_ring->rdata, M_AXGBE);
1712 }
1713 free(channel->tx_ring, M_AXGBE);
1714
1715 channel->tx_ring = NULL;
1716 }
1717
1718 return (ENOMEM);
1719
1720 } /* axgbe_if_tx_queues_alloc */
1721
1722 static int
1723 axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs,
1724 int nrxqsets)
1725 {
1726 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1727 struct xgbe_prv_data *pdata = &sc->pdata;
1728 if_softc_ctx_t scctx = sc->scctx;
1729 struct xgbe_channel *channel;
1730 struct xgbe_ring *rx_ring;
1731 int i, j, k;
1732
1733 MPASS(scctx->isc_nrxqsets > 0);
1734 MPASS(scctx->isc_nrxqsets == nrxqsets);
1735 if (!pdata->sph_enable) {
1736 MPASS(nrxqs == 1);
1737 } else {
1738 MPASS(nrxqs == 2);
1739 }
1740
1741 axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__,
1742 scctx->isc_nrxqsets, nrxqsets, nrxqs);
1743
1744 for (i = 0 ; i < nrxqsets; i++) {
1745
1746 channel = pdata->channel[i];
1747
1748 rx_ring = (struct xgbe_ring*)malloc(nrxqs *
1749 sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO);
1750
1751 if (rx_ring == NULL) {
1752 axgbe_error("Unable to allocate RX ring memory\n");
1753 goto rx_ring_fail;
1754 }
1755
1756 channel->rx_ring = rx_ring;
1757
1758 for (j = 0; j < nrxqs; j++, rx_ring++) {
1759 rx_ring->rdata =
1760 (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] *
1761 sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT);
1762
1763 /* Get the virtual and physical address of the hw queues */
1764 rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j];
1765 rx_ring->rdesc_paddr = pa[i*nrxqs + j];
1766 rx_ring->rdesc_count = scctx->isc_nrxd[j];
1767 spin_lock_init(&rx_ring->lock);
1768 }
1769 }
1770
1771 axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets);
1772
1773 return (0);
1774
1775 rx_ring_fail:
1776
1777 for (j = 0 ; j < i ; j++) {
1778
1779 channel = pdata->channel[j];
1780
1781 rx_ring = channel->rx_ring;
1782 for (k = 0; k < nrxqs ; k++, rx_ring++) {
1783 if (rx_ring && rx_ring->rdata)
1784 free(rx_ring->rdata, M_AXGBE);
1785 }
1786 free(channel->rx_ring, M_AXGBE);
1787
1788 channel->rx_ring = NULL;
1789 }
1790
1791 return (ENOMEM);
1792
1793 } /* axgbe_if_rx_queues_alloc */
1794
1795 static void
1796 axgbe_if_queues_free(if_ctx_t ctx)
1797 {
1798 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1799 struct xgbe_prv_data *pdata = &sc->pdata;
1800 if_softc_ctx_t scctx = sc->scctx;
1801 if_shared_ctx_t sctx = sc->sctx;
1802 struct xgbe_channel *channel;
1803 struct xgbe_ring *tx_ring;
1804 struct xgbe_ring *rx_ring;
1805 int i, j;
1806
1807 for (i = 0 ; i < scctx->isc_ntxqsets; i++) {
1808
1809 channel = pdata->channel[i];
1810
1811 tx_ring = channel->tx_ring;
1812 for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) {
1813 if (tx_ring && tx_ring->rdata)
1814 free(tx_ring->rdata, M_AXGBE);
1815 }
1816 free(channel->tx_ring, M_AXGBE);
1817 channel->tx_ring = NULL;
1818 }
1819
1820 for (i = 0 ; i < scctx->isc_nrxqsets; i++) {
1821
1822 channel = pdata->channel[i];
1823
1824 rx_ring = channel->rx_ring;
1825 for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) {
1826 if (rx_ring && rx_ring->rdata)
1827 free(rx_ring->rdata, M_AXGBE);
1828 }
1829 free(channel->rx_ring, M_AXGBE);
1830 channel->rx_ring = NULL;
1831 }
1832
1833 axgbe_free_channels(sc);
1834 } /* axgbe_if_queues_free */
1835
1836 static void
1837 axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag)
1838 {
1839 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1840 struct xgbe_prv_data *pdata = &sc->pdata;
1841 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1842
1843 if (!bit_test(pdata->active_vlans, vtag)) {
1844 axgbe_printf(0, "Registering VLAN %d\n", vtag);
1845
1846 bit_set(pdata->active_vlans, vtag);
1847 hw_if->update_vlan_hash_table(pdata);
1848 pdata->num_active_vlans++;
1849
1850 axgbe_printf(1, "Total active vlans: %d\n",
1851 pdata->num_active_vlans);
1852 } else
1853 axgbe_printf(0, "VLAN %d already registered\n", vtag);
1854
1855 xgbe_dump_active_vlans(pdata);
1856 }
1857
1858 static void
1859 axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
1860 {
1861 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1862 struct xgbe_prv_data *pdata = &sc->pdata;
1863 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1864
1865 if (pdata->num_active_vlans == 0) {
1866 axgbe_printf(1, "No active VLANs to unregister\n");
1867 return;
1868 }
1869
1870 if (bit_test(pdata->active_vlans, vtag)){
1871 axgbe_printf(0, "Un-Registering VLAN %d\n", vtag);
1872
1873 bit_clear(pdata->active_vlans, vtag);
1874 hw_if->update_vlan_hash_table(pdata);
1875 pdata->num_active_vlans--;
1876
1877 axgbe_printf(1, "Total active vlans: %d\n",
1878 pdata->num_active_vlans);
1879 } else
1880 axgbe_printf(0, "VLAN %d already unregistered\n", vtag);
1881
1882 xgbe_dump_active_vlans(pdata);
1883 }
1884
1885 #if __FreeBSD_version >= 1300000
1886 static bool
1887 axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1888 {
1889 switch (event) {
1890 case IFLIB_RESTART_VLAN_CONFIG:
1891 default:
1892 return (true);
1893 }
1894 }
1895 #endif
1896
1897 static int
1898 axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
1899 {
1900 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
1901 struct xgbe_prv_data *pdata = &sc->pdata;
1902 if_softc_ctx_t scctx = sc->scctx;
1903 struct xgbe_channel *channel;
1904 struct if_irq irq;
1905 int i, error, rid = 0, flags;
1906 char buf[16];
1907
1908 MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY);
1909
1910 pdata->isr_as_tasklet = 1;
1911
1912 if (scctx->isc_intr == IFLIB_INTR_MSI) {
1913 pdata->irq_count = 1;
1914 pdata->channel_irq_count = 1;
1915 return (0);
1916 }
1917
1918 axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix,
1919 scctx->isc_ntxqsets, scctx->isc_nrxqsets);
1920
1921 flags = RF_ACTIVE;
1922
1923 /* DEV INTR SETUP */
1924 rid++;
1925 error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid,
1926 IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq");
1927 if (error) {
1928 axgbe_error("Failed to register device interrupt rid %d name %s\n",
1929 rid, "dev_irq");
1930 return (error);
1931 }
1932
1933 /* ECC INTR SETUP */
1934 rid++;
1935 pdata->ecc_rid = rid;
1936 pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
1937 &rid, flags);
1938 if (!pdata->ecc_irq_res) {
1939 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
1940 rid, "ecc_irq");
1941 return (ENOMEM);
1942 }
1943
1944 error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE |
1945 INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag);
1946 if (error) {
1947 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
1948 rid, "ecc_irq", error);
1949 return (error);
1950 }
1951
1952 /* I2C INTR SETUP */
1953 rid++;
1954 pdata->i2c_rid = rid;
1955 pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
1956 &rid, flags);
1957 if (!pdata->i2c_irq_res) {
1958 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
1959 rid, "i2c_irq");
1960 return (ENOMEM);
1961 }
1962
1963 error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE |
1964 INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag);
1965 if (error) {
1966 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
1967 rid, "i2c_irq", error);
1968 return (error);
1969 }
1970
1971 /* AN INTR SETUP */
1972 rid++;
1973 pdata->an_rid = rid;
1974 pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ,
1975 &rid, flags);
1976 if (!pdata->an_irq_res) {
1977 axgbe_error("failed to allocate IRQ for rid %d, name %s.\n",
1978 rid, "an_irq");
1979 return (ENOMEM);
1980 }
1981
1982 error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE |
1983 INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag);
1984 if (error) {
1985 axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n",
1986 rid, "an_irq", error);
1987 return (error);
1988 }
1989
1990 pdata->per_channel_irq = 1;
1991 pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
1992 rid++;
1993 for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) {
1994
1995 channel = pdata->channel[i];
1996
1997 snprintf(buf, sizeof(buf), "rxq%d", i);
1998 error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RXTX,
1999 axgbe_msix_que, channel, channel->queue_index, buf);
2000
2001 if (error) {
2002 axgbe_error("Failed to allocated que int %d err: %d\n",
2003 i, error);
2004 return (error);
2005 }
2006
2007 channel->dma_irq_rid = rid;
2008 channel->dma_irq_res = irq.ii_res;
2009 channel->dma_irq_tag = irq.ii_tag;
2010 axgbe_printf(1, "%s: channel count %d idx %d irq %d\n",
2011 __func__, scctx->isc_nrxqsets, i, rid);
2012 }
2013 pdata->irq_count = msix;
2014 pdata->channel_irq_count = scctx->isc_nrxqsets;
2015
2016 for (i = 0; i < scctx->isc_ntxqsets; i++) {
2017
2018 channel = pdata->channel[i];
2019
2020 snprintf(buf, sizeof(buf), "txq%d", i);
2021 irq.ii_res = channel->dma_irq_res;
2022 iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel,
2023 channel->queue_index, buf);
2024 }
2025
2026 return (0);
2027 } /* axgbe_if_msix_intr_assign */
2028
2029 static int
2030 xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel)
2031 {
2032 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2033 enum xgbe_int int_id;
2034
2035 if (channel->tx_ring && channel->rx_ring)
2036 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
2037 else if (channel->tx_ring)
2038 int_id = XGMAC_INT_DMA_CH_SR_TI;
2039 else if (channel->rx_ring)
2040 int_id = XGMAC_INT_DMA_CH_SR_RI;
2041 else
2042 return (-1);
2043
2044 axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n",
2045 __func__, channel->queue_index, int_id);
2046 return (hw_if->enable_int(channel, int_id));
2047 }
2048
2049 static void
2050 xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel)
2051 {
2052 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2053 enum xgbe_int int_id;
2054
2055 if (channel->tx_ring && channel->rx_ring)
2056 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
2057 else if (channel->tx_ring)
2058 int_id = XGMAC_INT_DMA_CH_SR_TI;
2059 else if (channel->rx_ring)
2060 int_id = XGMAC_INT_DMA_CH_SR_RI;
2061 else
2062 return;
2063
2064 axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n",
2065 __func__, channel->queue_index, int_id);
2066 hw_if->disable_int(channel, int_id);
2067 }
2068
2069 static void
2070 xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
2071 {
2072 unsigned int i;
2073
2074 for (i = 0; i < pdata->channel_count; i++)
2075 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
2076 }
2077
2078 static int
2079 axgbe_msix_que(void *arg)
2080 {
2081 struct xgbe_channel *channel = (struct xgbe_channel *)arg;
2082 struct xgbe_prv_data *pdata = channel->pdata;
2083 unsigned int dma_status;
2084
2085 axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n",
2086 __func__, channel->queue_index,
2087 XGMAC_DMA_IOREAD(channel, DMA_CH_SR),
2088 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR),
2089 XGMAC_DMA_IOREAD(channel, DMA_CH_IER),
2090 XGMAC_IOREAD(pdata, DMA_ISR),
2091 XGMAC_IOREAD(pdata, MAC_ISR));
2092
2093 (void)XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
2094
2095 /* Disable Tx and Rx channel interrupts */
2096 xgbe_disable_rx_tx_int(pdata, channel);
2097
2098 /* Clear the interrupts */
2099 dma_status = 0;
2100 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
2101 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
2102 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
2103
2104 return (FILTER_SCHEDULE_THREAD);
2105 }
2106
2107 static int
2108 axgbe_dev_isr(void *arg)
2109 {
2110 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
2111 struct xgbe_prv_data *pdata = &sc->pdata;
2112 struct xgbe_channel *channel;
2113 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2114 unsigned int i, dma_isr, dma_ch_isr;
2115 unsigned int mac_isr, mac_mdioisr;
2116 int ret = FILTER_HANDLED;
2117
2118 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
2119 axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr);
2120
2121 if (!dma_isr)
2122 return (FILTER_HANDLED);
2123
2124 for (i = 0; i < pdata->channel_count; i++) {
2125
2126 if (!(dma_isr & (1 << i)))
2127 continue;
2128
2129 channel = pdata->channel[i];
2130
2131 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
2132 axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__,
2133 channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel,
2134 DMA_CH_DSR));
2135
2136 /*
2137 * The TI or RI interrupt bits may still be set even if using
2138 * per channel DMA interrupts. Check to be sure those are not
2139 * enabled before using the private data napi structure.
2140 */
2141 if (!pdata->per_channel_irq &&
2142 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
2143 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
2144
2145 /* Disable Tx and Rx interrupts */
2146 xgbe_disable_rx_tx_ints(pdata);
2147 } else {
2148
2149 /*
2150 * Don't clear Rx/Tx status if doing per channel DMA
2151 * interrupts, these will be cleared by the ISR for
2152 * per channel DMA interrupts
2153 */
2154 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
2155 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
2156 }
2157
2158 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
2159 pdata->ext_stats.rx_buffer_unavailable++;
2160
2161 /* Restart the device on a Fatal Bus Error */
2162 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
2163 axgbe_error("%s: Fatal bus error reported 0x%x\n",
2164 __func__, dma_ch_isr);
2165
2166 /* Clear all interrupt signals */
2167 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
2168
2169 ret = FILTER_SCHEDULE_THREAD;
2170 }
2171
2172 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
2173
2174 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
2175 axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr);
2176
2177 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
2178 hw_if->tx_mmc_int(pdata);
2179
2180 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
2181 hw_if->rx_mmc_int(pdata);
2182
2183 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
2184 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
2185
2186 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
2187 SNGLCOMPINT))
2188 wakeup_one(pdata);
2189 }
2190
2191 }
2192
2193 return (ret);
2194 } /* axgbe_dev_isr */
2195
2196 static void
2197 axgbe_i2c_isr(void *arg)
2198 {
2199 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
2200
2201 sc->pdata.i2c_if.i2c_isr(&sc->pdata);
2202 }
2203
2204 static void
2205 axgbe_ecc_isr(void *arg)
2206 {
2207 /* TODO - implement */
2208 }
2209
2210 static void
2211 axgbe_an_isr(void *arg)
2212 {
2213 struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg;
2214
2215 sc->pdata.phy_if.an_isr(&sc->pdata);
2216 }
2217
2218 static int
2219 axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2220 {
2221 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2222 struct xgbe_prv_data *pdata = &sc->pdata;
2223 int ret;
2224
2225 if (qid < pdata->tx_q_count) {
2226 ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]);
2227 if (ret) {
2228 axgbe_error("Enable TX INT failed\n");
2229 return (ret);
2230 }
2231 } else
2232 axgbe_error("Queue ID exceed channel count\n");
2233
2234 return (0);
2235 }
2236
2237 static int
2238 axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2239 {
2240 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2241 struct xgbe_prv_data *pdata = &sc->pdata;
2242 int ret;
2243
2244 if (qid < pdata->rx_q_count) {
2245 ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]);
2246 if (ret) {
2247 axgbe_error("Enable RX INT failed\n");
2248 return (ret);
2249 }
2250 } else
2251 axgbe_error("Queue ID exceed channel count\n");
2252
2253 return (0);
2254 }
2255
2256 static void
2257 axgbe_if_update_admin_status(if_ctx_t ctx)
2258 {
2259 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2260 struct xgbe_prv_data *pdata = &sc->pdata;
2261
2262 axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__,
2263 pdata->phy_link, sc->link_status, pdata->phy.speed);
2264
2265 if (pdata->phy_link < 0)
2266 return;
2267
2268 if (pdata->phy_link) {
2269 if (sc->link_status == LINK_STATE_DOWN) {
2270 sc->link_status = LINK_STATE_UP;
2271 if (pdata->phy.speed & SPEED_10000)
2272 iflib_link_state_change(ctx, LINK_STATE_UP,
2273 IF_Gbps(10));
2274 else if (pdata->phy.speed & SPEED_2500)
2275 iflib_link_state_change(ctx, LINK_STATE_UP,
2276 IF_Gbps(2.5));
2277 else if (pdata->phy.speed & SPEED_1000)
2278 iflib_link_state_change(ctx, LINK_STATE_UP,
2279 IF_Gbps(1));
2280 else if (pdata->phy.speed & SPEED_100)
2281 iflib_link_state_change(ctx, LINK_STATE_UP,
2282 IF_Mbps(100));
2283 else if (pdata->phy.speed & SPEED_10)
2284 iflib_link_state_change(ctx, LINK_STATE_UP,
2285 IF_Mbps(10));
2286 }
2287 } else {
2288 if (sc->link_status == LINK_STATE_UP) {
2289 sc->link_status = LINK_STATE_DOWN;
2290 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
2291 }
2292 }
2293 }
2294
2295 static int
2296 axgbe_if_media_change(if_ctx_t ctx)
2297 {
2298 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2299 struct ifmedia *ifm = iflib_get_media(ctx);
2300
2301 sx_xlock(&sc->pdata.an_mutex);
2302 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2303 return (EINVAL);
2304
2305 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2306 case IFM_10G_KR:
2307 sc->pdata.phy.speed = SPEED_10000;
2308 sc->pdata.phy.autoneg = AUTONEG_DISABLE;
2309 break;
2310 case IFM_2500_KX:
2311 sc->pdata.phy.speed = SPEED_2500;
2312 sc->pdata.phy.autoneg = AUTONEG_DISABLE;
2313 break;
2314 case IFM_1000_KX:
2315 sc->pdata.phy.speed = SPEED_1000;
2316 sc->pdata.phy.autoneg = AUTONEG_DISABLE;
2317 break;
2318 case IFM_100_TX:
2319 sc->pdata.phy.speed = SPEED_100;
2320 sc->pdata.phy.autoneg = AUTONEG_DISABLE;
2321 break;
2322 case IFM_AUTO:
2323 sc->pdata.phy.autoneg = AUTONEG_ENABLE;
2324 break;
2325 }
2326 sx_xunlock(&sc->pdata.an_mutex);
2327
2328 return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata));
2329 }
2330
2331 static int
2332 axgbe_if_promisc_set(if_ctx_t ctx, int flags)
2333 {
2334 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2335 struct xgbe_prv_data *pdata = &sc->pdata;
2336 if_t ifp = pdata->netdev;
2337
2338 axgbe_printf(1, "%s: MAC_PFR 0x%x drv_flags 0x%x if_flags 0x%x\n",
2339 __func__, XGMAC_IOREAD(pdata, MAC_PFR), if_getdrvflags(ifp),
2340 if_getflags(ifp));
2341
2342 if (if_getflags(ifp) & IFF_PPROMISC) {
2343
2344 axgbe_printf(1, "User requested to enter promisc mode\n");
2345
2346 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 1) {
2347 axgbe_printf(1, "Already in promisc mode\n");
2348 return (0);
2349 }
2350
2351 axgbe_printf(1, "Entering promisc mode\n");
2352 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
2353 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
2354 } else {
2355
2356 axgbe_printf(1, "User requested to leave promisc mode\n");
2357
2358 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 0) {
2359 axgbe_printf(1, "Already not in promisc mode\n");
2360 return (0);
2361 }
2362
2363 axgbe_printf(1, "Leaving promisc mode\n");
2364 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
2365 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
2366 }
2367
2368 return (0);
2369 }
2370
2371 static uint64_t
2372 axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
2373 {
2374 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2375 if_t ifp = iflib_get_ifp(ctx);
2376 struct xgbe_prv_data *pdata = &sc->pdata;
2377 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2378
2379 pdata->hw_if.read_mmc_stats(pdata);
2380
2381 switch(cnt) {
2382 case IFCOUNTER_IPACKETS:
2383 return (pstats->rxframecount_gb);
2384 case IFCOUNTER_IERRORS:
2385 return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g -
2386 pstats->rxmulticastframes_g - pstats->rxunicastframes_g);
2387 case IFCOUNTER_OPACKETS:
2388 return (pstats->txframecount_gb);
2389 case IFCOUNTER_OERRORS:
2390 return (pstats->txframecount_gb - pstats->txframecount_g);
2391 case IFCOUNTER_IBYTES:
2392 return (pstats->rxoctetcount_gb);
2393 case IFCOUNTER_OBYTES:
2394 return (pstats->txoctetcount_gb);
2395 default:
2396 return (if_get_counter_default(ifp, cnt));
2397 }
2398 }
2399
2400 static int
2401 axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
2402 {
2403 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2404 struct xgbe_prv_data *pdata = &sc->pdata;
2405 int ret;
2406
2407 if (mtu > XGMAC_JUMBO_PACKET_MTU)
2408 return (EINVAL);
2409
2410 ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu);
2411 pdata->rx_buf_size = ret;
2412 axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret);
2413
2414 sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2415 return (0);
2416 }
2417
2418 static void
2419 axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2420 {
2421 struct axgbe_if_softc *sc = iflib_get_softc(ctx);
2422 struct xgbe_prv_data *pdata = &sc->pdata;
2423
2424 ifmr->ifm_status = IFM_AVALID;
2425 if (!sc->pdata.phy.link)
2426 return;
2427
2428 ifmr->ifm_active = IFM_ETHER;
2429 ifmr->ifm_status |= IFM_ACTIVE;
2430
2431 axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed,
2432 pdata->phy_if.phy_impl.cur_mode(pdata));
2433 pdata->phy_if.phy_impl.get_type(pdata, ifmr);
2434
2435 ifmr->ifm_active |= IFM_FDX;
2436 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2437 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2438 }
Cache object: 74c0c0cf80bc46e7e4f3a4c219806a3a
|