1 /*-
2 * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3 * All rights reserved.
4 *
5 * Developed by Semihalf.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46
47 #include <machine/atomic.h>
48
49 #include "opt_inet.h"
50 #include "opt_inet6.h"
51
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <netinet/in.h>
60 #include <net/if_vlan_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/tcp_lro.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70
71 #ifdef INET6
72 #include <netinet/ip6.h>
73 #endif
74
75 #include <sys/sockio.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 #include <al_hal_common.h>
84 #include <al_hal_plat_services.h>
85 #include <al_hal_udma_config.h>
86 #include <al_hal_udma_iofic.h>
87 #include <al_hal_udma_debug.h>
88 #include <al_hal_eth.h>
89
90 #include "al_eth.h"
91 #include "al_init_eth_lm.h"
92 #include "arm/annapurna/alpine/alpine_serdes.h"
93
94 #include "miibus_if.h"
95
96 #define device_printf_dbg(fmt, ...) do { \
97 if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \
98 device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \
99 } while (0)
100
101 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
102
103 /* move out to some pci header file */
104 #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36
105 #define PCI_DEVICE_ID_AL_ETH 0x0001
106 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002
107 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003
108 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030
109 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011
110 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011
111 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021
112 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021
113 #define PCI_DEVICE_ID_AL_USB 0x0041
114
115 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
116 #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
117
118 #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0
119 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4
120 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
121 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
122
123 #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1)
124 #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1)
125
126 #define AL_ETH_THASH_UDMA_SHIFT 0
127 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT)
128
129 #define AL_ETH_THASH_Q_SHIFT 4
130 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT)
131
132 /* the following defines should be moved to hal */
133 #define AL_ETH_FSM_ENTRY_IPV4_TCP 0
134 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1
135 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2
136 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3
137 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4
138 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5
139
140 /* FSM DATA format */
141 #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0
142 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1
143 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2
144 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3
145
146 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2)
147
148 #define AL_ETH_FSM_DATA_DEFAULT_Q 0
149 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0
150
151 #define AL_BR_SIZE 512
152 #define AL_TSO_SIZE 65500
153 #define AL_DEFAULT_MTU 1500
154
155 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
156
157 #define AL_IP_ALIGNMENT_OFFSET 2
158
159 #define SFP_I2C_ADDR 0x50
160
161 #define AL_MASK_GROUP_A_INT 0x7
162 #define AL_MASK_GROUP_B_INT 0xF
163 #define AL_MASK_GROUP_C_INT 0xF
164 #define AL_MASK_GROUP_D_INT 0xFFFFFFFF
165
166 #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210)
167 #define AL_EN_FORWARD_INTR 0x1FFFF
168 #define AL_DIS_FORWARD_INTR 0
169
170 #define AL_M2S_MASK_INIT 0x480
171 #define AL_S2M_MASK_INIT 0x1E0
172 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25)
173
174 #define AL_10BASE_T_SPEED 10
175 #define AL_100BASE_TX_SPEED 100
176 #define AL_1000BASE_T_SPEED 1000
177
178 #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
179 #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock))
180 #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock))
181
182 /* helper functions */
183 static int al_is_device_supported(device_t);
184
185 static void al_eth_init_rings(struct al_eth_adapter *);
186 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
187 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
188 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
189 int al_eth_read_pci_config(void *, int, uint32_t *);
190 int al_eth_write_pci_config(void *, int, uint32_t);
191 void al_eth_irq_config(uint32_t *, uint32_t);
192 void al_eth_forward_int_config(uint32_t *, uint32_t);
193 static void al_eth_start_xmit(void *, int);
194 static void al_eth_rx_recv_work(void *, int);
195 static int al_eth_up(struct al_eth_adapter *);
196 static void al_eth_down(struct al_eth_adapter *);
197 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
198 static void al_eth_interrupts_mask(struct al_eth_adapter *);
199 static int al_eth_check_mtu(struct al_eth_adapter *, int);
200 static uint64_t al_get_counter(struct ifnet *, ift_counter);
201 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
202 static int al_eth_board_params_init(struct al_eth_adapter *);
203 static int al_media_update(struct ifnet *);
204 static void al_media_status(struct ifnet *, struct ifmediareq *);
205 static int al_eth_function_reset(struct al_eth_adapter *);
206 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
207 static void al_eth_serdes_init(struct al_eth_adapter *);
208 static void al_eth_lm_config(struct al_eth_adapter *);
209 static int al_eth_hw_init(struct al_eth_adapter *);
210
211 static void al_tick_stats(void *);
212
213 /* ifnet entry points */
214 static void al_init(void *);
215 static int al_mq_start(struct ifnet *, struct mbuf *);
216 static void al_qflush(struct ifnet *);
217 static int al_ioctl(struct ifnet * ifp, u_long, caddr_t);
218
219 /* bus entry points */
220 static int al_probe(device_t);
221 static int al_attach(device_t);
222 static int al_detach(device_t);
223 static int al_shutdown(device_t);
224
225 /* mii bus support routines */
226 static int al_miibus_readreg(device_t, int, int);
227 static int al_miibus_writereg(device_t, int, int, int);
228 static void al_miibus_statchg(device_t);
229 static void al_miibus_linkchg(device_t);
230
231 struct al_eth_adapter* g_adapters[16];
232 uint32_t g_adapters_count;
233
234 /* flag for napi-like mbuf processing, controlled from sysctl */
235 static int napi = 0;
236
237 static device_method_t al_methods[] = {
238 /* Device interface */
239 DEVMETHOD(device_probe, al_probe),
240 DEVMETHOD(device_attach, al_attach),
241 DEVMETHOD(device_detach, al_detach),
242 DEVMETHOD(device_shutdown, al_shutdown),
243
244 DEVMETHOD(miibus_readreg, al_miibus_readreg),
245 DEVMETHOD(miibus_writereg, al_miibus_writereg),
246 DEVMETHOD(miibus_statchg, al_miibus_statchg),
247 DEVMETHOD(miibus_linkchg, al_miibus_linkchg),
248 { 0, 0 }
249 };
250
251 static driver_t al_driver = {
252 "al",
253 al_methods,
254 sizeof(struct al_eth_adapter),
255 };
256
257 DRIVER_MODULE(al, pci, al_driver, 0, 0);
258 DRIVER_MODULE(miibus, al, miibus_driver, 0, 0);
259
260 static int
261 al_probe(device_t dev)
262 {
263 if ((al_is_device_supported(dev)) != 0) {
264 device_set_desc(dev, "al");
265 return (BUS_PROBE_DEFAULT);
266 }
267 return (ENXIO);
268 }
269
270 static int
271 al_attach(device_t dev)
272 {
273 struct al_eth_adapter *adapter;
274 struct sysctl_oid_list *child;
275 struct sysctl_ctx_list *ctx;
276 struct sysctl_oid *tree;
277 struct ifnet *ifp;
278 uint32_t dev_id;
279 uint32_t rev_id;
280 int bar_udma;
281 int bar_mac;
282 int bar_ec;
283 int err;
284
285 err = 0;
286 ifp = NULL;
287 dev_id = rev_id = 0;
288 ctx = device_get_sysctl_ctx(dev);
289 tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
290 child = SYSCTL_CHILDREN(tree);
291
292 if (g_adapters_count == 0) {
293 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
294 CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
295 }
296 adapter = device_get_softc(dev);
297 adapter->dev = dev;
298 adapter->board_type = ALPINE_INTEGRATED;
299 snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
300 device_get_nameunit(dev));
301 AL_RX_LOCK_INIT(adapter);
302
303 g_adapters[g_adapters_count] = adapter;
304
305 bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
306 adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
307 &bar_udma, RF_ACTIVE);
308 if (adapter->udma_res == NULL) {
309 device_printf(adapter->dev,
310 "could not allocate memory resources for DMA.\n");
311 err = ENOMEM;
312 goto err_res_dma;
313 }
314 adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
315 rman_get_bushandle(adapter->udma_res));
316 bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
317 adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
318 &bar_mac, RF_ACTIVE);
319 if (adapter->mac_res == NULL) {
320 device_printf(adapter->dev,
321 "could not allocate memory resources for MAC.\n");
322 err = ENOMEM;
323 goto err_res_mac;
324 }
325 adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
326 rman_get_bushandle(adapter->mac_res));
327
328 bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
329 adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
330 RF_ACTIVE);
331 if (adapter->ec_res == NULL) {
332 device_printf(adapter->dev,
333 "could not allocate memory resources for EC.\n");
334 err = ENOMEM;
335 goto err_res_ec;
336 }
337 adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
338 rman_get_bushandle(adapter->ec_res));
339
340 adapter->netdev = ifp = if_alloc(IFT_ETHER);
341
342 adapter->netdev->if_link_state = LINK_STATE_DOWN;
343
344 ifp->if_softc = adapter;
345 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
346 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
347 ifp->if_flags = ifp->if_drv_flags;
348 ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
349 ifp->if_transmit = al_mq_start;
350 ifp->if_qflush = al_qflush;
351 ifp->if_ioctl = al_ioctl;
352 ifp->if_init = al_init;
353 ifp->if_get_counter = al_get_counter;
354 ifp->if_mtu = AL_DEFAULT_MTU;
355
356 adapter->if_flags = ifp->if_flags;
357
358 ifp->if_capabilities = ifp->if_capenable = 0;
359
360 ifp->if_capabilities |= IFCAP_HWCSUM |
361 IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
362 IFCAP_LRO | IFCAP_JUMBO_MTU;
363
364 ifp->if_capenable = ifp->if_capabilities;
365
366 adapter->id_number = g_adapters_count;
367
368 if (adapter->board_type == ALPINE_INTEGRATED) {
369 dev_id = pci_get_device(adapter->dev);
370 rev_id = pci_get_revid(adapter->dev);
371 } else {
372 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
373 PCIR_DEVICE, &dev_id);
374 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
375 PCIR_REVID, &rev_id);
376 }
377
378 adapter->dev_id = dev_id;
379 adapter->rev_id = rev_id;
380
381 /* set default ring sizes */
382 adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
383 adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
384 adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
385 adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
386
387 adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
388 adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
389
390 adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN;
391 adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
392 adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
393
394 al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu);
395
396 adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
397
398 err = al_eth_board_params_init(adapter);
399 if (err != 0)
400 goto err;
401
402 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
403 ifmedia_init(&adapter->media, IFM_IMASK,
404 al_media_update, al_media_status);
405 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
406 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
407 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
408 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
409 }
410
411 al_eth_function_reset(adapter);
412
413 err = al_eth_hw_init_adapter(adapter);
414 if (err != 0)
415 goto err;
416
417 al_eth_init_rings(adapter);
418 g_adapters_count++;
419
420 al_eth_lm_config(adapter);
421 mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
422 mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
423 callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
424 callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
425
426 ether_ifattach(ifp, adapter->mac_addr);
427 ifp->if_mtu = AL_DEFAULT_MTU;
428
429 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
430 al_eth_hw_init(adapter);
431
432 /* Attach PHY(s) */
433 err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
434 al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
435 MII_OFFSET_ANY, 0);
436 if (err != 0) {
437 device_printf(adapter->dev, "attaching PHYs failed\n");
438 return (err);
439 }
440
441 adapter->mii = device_get_softc(adapter->miibus);
442 }
443
444 return (err);
445
446 err:
447 bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
448 err_res_ec:
449 bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
450 err_res_mac:
451 bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
452 err_res_dma:
453 return (err);
454 }
455
456 static int
457 al_detach(device_t dev)
458 {
459 struct al_eth_adapter *adapter;
460
461 adapter = device_get_softc(dev);
462 ether_ifdetach(adapter->netdev);
463
464 mtx_destroy(&adapter->stats_mtx);
465 mtx_destroy(&adapter->wd_mtx);
466
467 al_eth_down(adapter);
468
469 bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res);
470 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
471 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
472 bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
473
474 return (0);
475 }
476
477 int
478 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
479 {
480
481 /* handle is the base address of the adapter */
482 *val = al_reg_read32((void*)((u_long)handle + where));
483
484 return (0);
485 }
486
487 int
488 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
489 {
490
491 /* handle is the base address of the adapter */
492 al_reg_write32((void*)((u_long)handle + where), val);
493 return (0);
494 }
495
496 int
497 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
498 {
499
500 /* handle is a pci_dev */
501 *val = pci_read_config((device_t)handle, where, sizeof(*val));
502 return (0);
503 }
504
505 int
506 al_eth_write_pci_config(void *handle, int where, uint32_t val)
507 {
508
509 /* handle is a pci_dev */
510 pci_write_config((device_t)handle, where, val, sizeof(val));
511 return (0);
512 }
513
514 void
515 al_eth_irq_config(uint32_t *offset, uint32_t value)
516 {
517
518 al_reg_write32_relaxed(offset, value);
519 }
520
521 void
522 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
523 {
524
525 al_reg_write32(offset, value);
526 }
527
528 static void
529 al_eth_serdes_init(struct al_eth_adapter *adapter)
530 {
531 void __iomem *serdes_base;
532
533 adapter->serdes_init = false;
534
535 serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
536 if (serdes_base == NULL) {
537 device_printf(adapter->dev, "serdes_base get failed!\n");
538 return;
539 }
540
541 serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
542
543 al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
544 &adapter->serdes_obj);
545
546 adapter->serdes_init = true;
547 }
548
549 static void
550 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
551 {
552 bus_addr_t *paddr;
553
554 paddr = arg;
555 *paddr = segs->ds_addr;
556 }
557
558 static int
559 al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
560 bus_addr_t *baddr, void **vaddr, uint32_t size)
561 {
562 int ret;
563 uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
564
565 ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
566 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
567 maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
568 if (ret != 0) {
569 device_printf(dev,
570 "failed to create bus tag, ret = %d\n", ret);
571 return (ret);
572 }
573
574 ret = bus_dmamem_alloc(*tag, vaddr,
575 BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
576 if (ret != 0) {
577 device_printf(dev,
578 "failed to allocate dmamem, ret = %d\n", ret);
579 return (ret);
580 }
581
582 ret = bus_dmamap_load(*tag, *map, *vaddr,
583 size, al_dma_map_addr, baddr, 0);
584 if (ret != 0) {
585 device_printf(dev,
586 "failed to allocate bus_dmamap_load, ret = %d\n", ret);
587 return (ret);
588 }
589
590 return (0);
591 }
592
593 static void
594 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
595 {
596
597 bus_dmamap_unload(tag, map);
598 bus_dmamem_free(tag, vaddr, map);
599 bus_dma_tag_destroy(tag);
600 }
601
602 static void
603 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
604 uint8_t idx, uint8_t udma_mask)
605 {
606 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
607
608 memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
609
610 memset(entry.mask, 0xff, sizeof(entry.mask));
611 entry.rx_valid = true;
612 entry.tx_valid = false;
613 entry.udma_mask = udma_mask;
614 entry.filter = false;
615
616 device_printf_dbg(adapter->dev,
617 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
618 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
619
620 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
621 }
622
623 static void
624 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
625 uint8_t udma_mask)
626 {
627 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
628
629 memset(entry.addr, 0x00, sizeof(entry.addr));
630 memset(entry.mask, 0x00, sizeof(entry.mask));
631 entry.mask[0] |= 1;
632 entry.addr[0] |= 1;
633
634 entry.rx_valid = true;
635 entry.tx_valid = false;
636 entry.udma_mask = udma_mask;
637 entry.filter = false;
638
639 device_printf_dbg(adapter->dev,
640 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
641 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
642
643 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
644 }
645
646 static void
647 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
648 uint8_t idx, uint8_t udma_mask)
649 {
650 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
651
652 memset(entry.addr, 0xff, sizeof(entry.addr));
653 memset(entry.mask, 0xff, sizeof(entry.mask));
654
655 entry.rx_valid = true;
656 entry.tx_valid = false;
657 entry.udma_mask = udma_mask;
658 entry.filter = false;
659
660 device_printf_dbg(adapter->dev,
661 "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
662 __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
663
664 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
665 }
666
667 static void
668 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
669 boolean_t promiscuous)
670 {
671 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
672
673 memset(entry.addr, 0x00, sizeof(entry.addr));
674 memset(entry.mask, 0x00, sizeof(entry.mask));
675
676 entry.rx_valid = true;
677 entry.tx_valid = false;
678 entry.udma_mask = (promiscuous) ? 1 : 0;
679 entry.filter = (promiscuous) ? false : true;
680
681 device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
682 __func__, (promiscuous) ? "enter" : "exit");
683
684 al_eth_fwd_mac_table_set(&adapter->hal_adapter,
685 AL_ETH_MAC_TABLE_DROP_IDX, &entry);
686 }
687
688 static void
689 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
690 uint8_t udma, uint32_t queue)
691 {
692
693 if (udma != 0)
694 panic("only UDMA0 is supporter");
695
696 if (queue >= AL_ETH_NUM_QUEUES)
697 panic("invalid queue number");
698
699 al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
700 }
701
702 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
703 static void
704 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
705 {
706 uint32_t val;
707 int i;
708
709 for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
710 uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
711 switch (outer_type) {
712 case AL_ETH_FSM_ENTRY_IPV4_TCP:
713 case AL_ETH_FSM_ENTRY_IPV4_UDP:
714 case AL_ETH_FSM_ENTRY_IPV6_TCP:
715 case AL_ETH_FSM_ENTRY_IPV6_UDP:
716 val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
717 AL_ETH_FSM_DATA_HASH_SEL;
718 break;
719 case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
720 case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
721 val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
722 AL_ETH_FSM_DATA_HASH_SEL;
723 break;
724 default:
725 val = AL_ETH_FSM_DATA_DEFAULT_Q |
726 AL_ETH_FSM_DATA_DEFAULT_UDMA;
727 }
728 al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
729 }
730 }
731
732 static void
733 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
734 uint8_t idx)
735 {
736 struct al_eth_fwd_mac_table_entry entry = { { 0 } };
737
738 device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
739
740 al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
741 }
742
743 static int
744 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
745 {
746 struct al_eth_adapter_params *params = &adapter->eth_hal_params;
747 int rc;
748
749 /* params->dev_id = adapter->dev_id; */
750 params->rev_id = adapter->rev_id;
751 params->udma_id = 0;
752 params->enable_rx_parser = 1; /* enable rx epe parser*/
753 params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
754 params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
755 params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
756 params->name = adapter->name;
757 params->serdes_lane = adapter->serdes_lane;
758
759 rc = al_eth_adapter_init(&adapter->hal_adapter, params);
760 if (rc != 0)
761 device_printf(adapter->dev, "%s failed at hal init!\n",
762 __func__);
763
764 if ((adapter->board_type == ALPINE_NIC) ||
765 (adapter->board_type == ALPINE_FPGA_NIC)) {
766 /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
767 struct al_udma_gen_tgtid_conf conf;
768 int i;
769 for (i = 0; i < DMA_MAX_Q; i++) {
770 conf.tx_q_conf[i].queue_en = AL_TRUE;
771 conf.tx_q_conf[i].desc_en = AL_FALSE;
772 conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
773 conf.rx_q_conf[i].queue_en = AL_TRUE;
774 conf.rx_q_conf[i].desc_en = AL_FALSE;
775 conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
776 }
777 al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
778 }
779
780 return (rc);
781 }
782
783 static void
784 al_eth_lm_config(struct al_eth_adapter *adapter)
785 {
786 struct al_eth_lm_init_params params = {0};
787
788 params.adapter = &adapter->hal_adapter;
789 params.serdes_obj = &adapter->serdes_obj;
790 params.lane = adapter->serdes_lane;
791 params.sfp_detection = adapter->sfp_detection_needed;
792 if (adapter->sfp_detection_needed == true) {
793 params.sfp_bus_id = adapter->i2c_adapter_id;
794 params.sfp_i2c_addr = SFP_I2C_ADDR;
795 }
796
797 if (adapter->sfp_detection_needed == false) {
798 switch (adapter->mac_mode) {
799 case AL_ETH_MAC_MODE_10GbE_Serial:
800 if ((adapter->lt_en != 0) && (adapter->an_en != 0))
801 params.default_mode = AL_ETH_LM_MODE_10G_DA;
802 else
803 params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
804 break;
805 case AL_ETH_MAC_MODE_SGMII:
806 params.default_mode = AL_ETH_LM_MODE_1G;
807 break;
808 default:
809 params.default_mode = AL_ETH_LM_MODE_10G_DA;
810 }
811 } else
812 params.default_mode = AL_ETH_LM_MODE_10G_DA;
813
814 params.link_training = adapter->lt_en;
815 params.rx_equal = true;
816 params.static_values = !adapter->dont_override_serdes;
817 params.i2c_context = adapter;
818 params.kr_fec_enable = false;
819
820 params.retimer_exist = adapter->retimer.exist;
821 params.retimer_bus_id = adapter->retimer.bus_id;
822 params.retimer_i2c_addr = adapter->retimer.i2c_addr;
823 params.retimer_channel = adapter->retimer.channel;
824
825 al_eth_lm_init(&adapter->lm_context, ¶ms);
826 }
827
828 static int
829 al_eth_board_params_init(struct al_eth_adapter *adapter)
830 {
831
832 if (adapter->board_type == ALPINE_NIC) {
833 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
834 adapter->sfp_detection_needed = false;
835 adapter->phy_exist = false;
836 adapter->an_en = false;
837 adapter->lt_en = false;
838 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
839 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
840 } else if (adapter->board_type == ALPINE_FPGA_NIC) {
841 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
842 adapter->sfp_detection_needed = false;
843 adapter->phy_exist = false;
844 adapter->an_en = false;
845 adapter->lt_en = false;
846 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
847 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
848 } else {
849 struct al_eth_board_params params;
850 int rc;
851
852 adapter->auto_speed = false;
853
854 rc = al_eth_board_params_get(adapter->mac_base, ¶ms);
855 if (rc != 0) {
856 device_printf(adapter->dev,
857 "board info not available\n");
858 return (-1);
859 }
860
861 adapter->phy_exist = params.phy_exist == TRUE;
862 adapter->phy_addr = params.phy_mdio_addr;
863 adapter->an_en = params.autoneg_enable;
864 adapter->lt_en = params.kr_lt_enable;
865 adapter->serdes_grp = params.serdes_grp;
866 adapter->serdes_lane = params.serdes_lane;
867 adapter->sfp_detection_needed = params.sfp_plus_module_exist;
868 adapter->i2c_adapter_id = params.i2c_adapter_id;
869 adapter->ref_clk_freq = params.ref_clk_freq;
870 adapter->dont_override_serdes = params.dont_override_serdes;
871 adapter->link_config.active_duplex = !params.half_duplex;
872 adapter->link_config.autoneg = !params.an_disable;
873 adapter->link_config.force_1000_base_x = params.force_1000_base_x;
874 adapter->retimer.exist = params.retimer_exist;
875 adapter->retimer.bus_id = params.retimer_bus_id;
876 adapter->retimer.i2c_addr = params.retimer_i2c_addr;
877 adapter->retimer.channel = params.retimer_channel;
878
879 switch (params.speed) {
880 default:
881 device_printf(adapter->dev,
882 "%s: invalid speed (%d)\n", __func__, params.speed);
883 case AL_ETH_BOARD_1G_SPEED_1000M:
884 adapter->link_config.active_speed = 1000;
885 break;
886 case AL_ETH_BOARD_1G_SPEED_100M:
887 adapter->link_config.active_speed = 100;
888 break;
889 case AL_ETH_BOARD_1G_SPEED_10M:
890 adapter->link_config.active_speed = 10;
891 break;
892 }
893
894 switch (params.mdio_freq) {
895 default:
896 device_printf(adapter->dev,
897 "%s: invalid mdio freq (%d)\n", __func__,
898 params.mdio_freq);
899 case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
900 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
901 break;
902 case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
903 adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
904 break;
905 }
906
907 switch (params.media_type) {
908 case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
909 if (params.sfp_plus_module_exist == TRUE)
910 /* Backward compatibility */
911 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
912 else
913 adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
914
915 adapter->use_lm = false;
916 break;
917 case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
918 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
919 adapter->use_lm = true;
920 break;
921 case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
922 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
923 adapter->use_lm = true;
924 break;
925 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
926 adapter->sfp_detection_needed = TRUE;
927 adapter->auto_speed = false;
928 adapter->use_lm = true;
929 break;
930 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
931 adapter->sfp_detection_needed = TRUE;
932 adapter->auto_speed = true;
933 adapter->mac_mode_set = false;
934 adapter->use_lm = true;
935
936 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
937 break;
938 default:
939 device_printf(adapter->dev,
940 "%s: unsupported media type %d\n",
941 __func__, params.media_type);
942 return (-1);
943 }
944
945 device_printf(adapter->dev,
946 "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
947 "SFP connected %s. media %d\n",
948 params.phy_exist == TRUE ? "Yes" : "No",
949 params.phy_mdio_addr, adapter->mdio_freq,
950 params.sfp_plus_module_exist == TRUE ? "Yes" : "No",
951 params.media_type);
952 }
953
954 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
955
956 return (0);
957 }
958
959 static int
960 al_eth_function_reset(struct al_eth_adapter *adapter)
961 {
962 struct al_eth_board_params params;
963 int rc;
964
965 /* save board params so we restore it after reset */
966 al_eth_board_params_get(adapter->mac_base, ¶ms);
967 al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
968 if (adapter->board_type == ALPINE_INTEGRATED)
969 rc = al_eth_flr_rmn(&al_eth_read_pci_config,
970 &al_eth_write_pci_config,
971 adapter->dev, adapter->mac_base);
972 else
973 rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
974 &al_eth_fpga_write_pci_config,
975 adapter->internal_pcie_base, adapter->mac_base);
976
977 /* restore params */
978 al_eth_board_params_set(adapter->mac_base, ¶ms);
979 al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
980
981 return (rc);
982 }
983
984 static void
985 al_eth_init_rings(struct al_eth_adapter *adapter)
986 {
987 int i;
988
989 for (i = 0; i < adapter->num_tx_queues; i++) {
990 struct al_eth_ring *ring = &adapter->tx_ring[i];
991
992 ring->ring_id = i;
993 ring->dev = adapter->dev;
994 ring->adapter = adapter;
995 ring->netdev = adapter->netdev;
996 al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
997 &ring->dma_q);
998 ring->sw_count = adapter->tx_ring_count;
999 ring->hw_count = adapter->tx_descs_count;
1000 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1001 ring->unmask_val = ~(1 << i);
1002 }
1003
1004 for (i = 0; i < adapter->num_rx_queues; i++) {
1005 struct al_eth_ring *ring = &adapter->rx_ring[i];
1006
1007 ring->ring_id = i;
1008 ring->dev = adapter->dev;
1009 ring->adapter = adapter;
1010 ring->netdev = adapter->netdev;
1011 al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1012 ring->sw_count = adapter->rx_ring_count;
1013 ring->hw_count = adapter->rx_descs_count;
1014 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1015 (struct unit_regs *)adapter->udma_base,
1016 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1017 ring->unmask_val = ~(1 << i);
1018 }
1019 }
1020
1021 static void
1022 al_init_locked(void *arg)
1023 {
1024 struct al_eth_adapter *adapter = arg;
1025 if_t ifp = adapter->netdev;
1026 int rc = 0;
1027
1028 al_eth_down(adapter);
1029 rc = al_eth_up(adapter);
1030
1031 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1032 if (rc == 0)
1033 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1034 }
1035
1036 static void
1037 al_init(void *arg)
1038 {
1039 struct al_eth_adapter *adapter = arg;
1040
1041 al_init_locked(adapter);
1042 }
1043
1044 static inline int
1045 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1046 struct al_eth_ring *rx_ring,
1047 struct al_eth_rx_buffer *rx_info)
1048 {
1049 struct al_buf *al_buf;
1050 bus_dma_segment_t segs[2];
1051 int error;
1052 int nsegs;
1053
1054 if (rx_info->m != NULL)
1055 return (0);
1056
1057 rx_info->data_size = adapter->rx_mbuf_sz;
1058
1059 AL_RX_LOCK(adapter);
1060
1061 /* Get mbuf using UMA allocator */
1062 rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1063 rx_info->data_size);
1064 AL_RX_UNLOCK(adapter);
1065
1066 if (rx_info->m == NULL)
1067 return (ENOMEM);
1068
1069 rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1070
1071 /* Map packets for DMA */
1072 error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1073 rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1074 if (__predict_false(error)) {
1075 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1076 error);
1077 m_freem(rx_info->m);
1078 rx_info->m = NULL;
1079 return (EFAULT);
1080 }
1081
1082 al_buf = &rx_info->al_buf;
1083 al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1084 al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1085
1086 return (0);
1087 }
1088
1089 static int
1090 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1091 unsigned int num)
1092 {
1093 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1094 uint16_t next_to_use;
1095 unsigned int i;
1096
1097 next_to_use = rx_ring->next_to_use;
1098
1099 for (i = 0; i < num; i++) {
1100 int rc;
1101 struct al_eth_rx_buffer *rx_info =
1102 &rx_ring->rx_buffer_info[next_to_use];
1103
1104 if (__predict_false(al_eth_alloc_rx_buf(adapter,
1105 rx_ring, rx_info) < 0)) {
1106 device_printf(adapter->dev,
1107 "failed to alloc buffer for rx queue %d\n", qid);
1108 break;
1109 }
1110
1111 rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1112 &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1113 if (__predict_false(rc)) {
1114 device_printf(adapter->dev,
1115 "failed to add buffer for rx queue %d\n", qid);
1116 break;
1117 }
1118
1119 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1120 }
1121
1122 if (__predict_false(i < num))
1123 device_printf(adapter->dev,
1124 "refilled rx queue %d with %d pages only - available %d\n",
1125 qid, i, al_udma_available_get(rx_ring->dma_q));
1126
1127 if (__predict_true(i))
1128 al_eth_rx_buffer_action(rx_ring->dma_q, i);
1129
1130 rx_ring->next_to_use = next_to_use;
1131
1132 return (i);
1133 }
1134
1135 /*
1136 * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1137 * @adapter: board private structure
1138 */
1139 static void
1140 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1141 {
1142 int i;
1143
1144 for (i = 0; i < adapter->num_rx_queues; i++)
1145 al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1146 }
1147
1148 static void
1149 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1150 {
1151 unsigned int total_done;
1152 uint16_t next_to_clean;
1153 int qid = tx_ring->ring_id;
1154
1155 total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1156 device_printf_dbg(tx_ring->dev,
1157 "tx_poll: q %d total completed descs %x\n", qid, total_done);
1158 next_to_clean = tx_ring->next_to_clean;
1159
1160 while (total_done != 0) {
1161 struct al_eth_tx_buffer *tx_info;
1162 struct mbuf *mbuf;
1163
1164 tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1165 /* stop if not all descriptors of the packet are completed */
1166 if (tx_info->tx_descs > total_done)
1167 break;
1168
1169 mbuf = tx_info->m;
1170
1171 tx_info->m = NULL;
1172
1173 device_printf_dbg(tx_ring->dev,
1174 "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1175
1176 /* map is no longer required */
1177 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1178
1179 m_freem(mbuf);
1180 total_done -= tx_info->tx_descs;
1181 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1182 }
1183
1184 tx_ring->next_to_clean = next_to_clean;
1185
1186 device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1187 qid, next_to_clean);
1188
1189 /*
1190 * need to make the rings circular update visible to
1191 * al_eth_start_xmit() before checking for netif_queue_stopped().
1192 */
1193 al_smp_data_memory_barrier();
1194 }
1195
1196 static void
1197 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1198 struct al_eth_pkt *hal_pkt, struct mbuf *m)
1199 {
1200 uint32_t mss = m->m_pkthdr.tso_segsz;
1201 struct ether_vlan_header *eh;
1202 uint16_t etype;
1203 #ifdef INET
1204 struct ip *ip;
1205 #endif
1206 #ifdef INET6
1207 struct ip6_hdr *ip6;
1208 #endif
1209 struct tcphdr *th = NULL;
1210 int ehdrlen, ip_hlen = 0;
1211 uint8_t ipproto = 0;
1212 uint32_t offload = 0;
1213
1214 if (mss != 0)
1215 offload = 1;
1216
1217 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1218 offload = 1;
1219
1220 if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1221 offload = 1;
1222
1223 if (offload != 0) {
1224 struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1225
1226 if (mss != 0)
1227 hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1228 AL_ETH_TX_FLAGS_L4_CSUM);
1229 else
1230 hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1231 AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1232
1233 /*
1234 * Determine where frame payload starts.
1235 * Jump over vlan headers if already present,
1236 * helpful for QinQ too.
1237 */
1238 eh = mtod(m, struct ether_vlan_header *);
1239 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1240 etype = ntohs(eh->evl_proto);
1241 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1242 } else {
1243 etype = ntohs(eh->evl_encap_proto);
1244 ehdrlen = ETHER_HDR_LEN;
1245 }
1246
1247 switch (etype) {
1248 #ifdef INET
1249 case ETHERTYPE_IP:
1250 ip = (struct ip *)(m->m_data + ehdrlen);
1251 ip_hlen = ip->ip_hl << 2;
1252 ipproto = ip->ip_p;
1253 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1254 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1255 if (mss != 0)
1256 hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1257 if (ipproto == IPPROTO_TCP)
1258 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1259 else
1260 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1261 break;
1262 #endif /* INET */
1263 #ifdef INET6
1264 case ETHERTYPE_IPV6:
1265 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1266 hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1267 ip_hlen = sizeof(struct ip6_hdr);
1268 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1269 ipproto = ip6->ip6_nxt;
1270 if (ipproto == IPPROTO_TCP)
1271 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1272 else
1273 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1274 break;
1275 #endif /* INET6 */
1276 default:
1277 break;
1278 }
1279
1280 meta->words_valid = 4;
1281 meta->l3_header_len = ip_hlen;
1282 meta->l3_header_offset = ehdrlen;
1283 if (th != NULL)
1284 meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1285 meta->mss_idx_sel = 0; /* check how to select MSS */
1286 meta->mss_val = mss;
1287 hal_pkt->meta = meta;
1288 } else
1289 hal_pkt->meta = NULL;
1290 }
1291
1292 #define XMIT_QUEUE_TIMEOUT 100
1293
1294 static void
1295 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1296 {
1297 struct al_eth_tx_buffer *tx_info;
1298 int error;
1299 int nsegs, a;
1300 uint16_t next_to_use;
1301 bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1302 struct al_eth_pkt *hal_pkt;
1303 struct al_buf *al_buf;
1304 boolean_t remap;
1305
1306 /* Check if queue is ready */
1307 if (unlikely(tx_ring->stall) != 0) {
1308 for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1309 if (al_udma_available_get(tx_ring->dma_q) >=
1310 (AL_ETH_DEFAULT_TX_HW_DESCS -
1311 AL_ETH_TX_WAKEUP_THRESH)) {
1312 tx_ring->stall = 0;
1313 break;
1314 }
1315 pause("stall", 1);
1316 }
1317 if (a == XMIT_QUEUE_TIMEOUT) {
1318 device_printf(tx_ring->dev,
1319 "timeout waiting for queue %d ready!\n",
1320 tx_ring->ring_id);
1321 return;
1322 } else {
1323 device_printf_dbg(tx_ring->dev,
1324 "queue %d is ready!\n", tx_ring->ring_id);
1325 }
1326 }
1327
1328 next_to_use = tx_ring->next_to_use;
1329 tx_info = &tx_ring->tx_buffer_info[next_to_use];
1330 tx_info->m = m;
1331 hal_pkt = &tx_info->hal_pkt;
1332
1333 if (m == NULL) {
1334 device_printf(tx_ring->dev, "mbuf is NULL\n");
1335 return;
1336 }
1337
1338 remap = TRUE;
1339 /* Map packets for DMA */
1340 retry:
1341 error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1342 m, segs, &nsegs, BUS_DMA_NOWAIT);
1343 if (__predict_false(error)) {
1344 struct mbuf *m_new;
1345
1346 if (error == EFBIG) {
1347 /* Try it again? - one try */
1348 if (remap == TRUE) {
1349 remap = FALSE;
1350 m_new = m_defrag(m, M_NOWAIT);
1351 if (m_new == NULL) {
1352 device_printf(tx_ring->dev,
1353 "failed to defrag mbuf\n");
1354 goto exit;
1355 }
1356 m = m_new;
1357 goto retry;
1358 } else {
1359 device_printf(tx_ring->dev,
1360 "failed to map mbuf, error %d\n", error);
1361 goto exit;
1362 }
1363 } else {
1364 device_printf(tx_ring->dev,
1365 "failed to map mbuf, error %d\n", error);
1366 goto exit;
1367 }
1368 }
1369
1370 /* set flags and meta data */
1371 hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1372 al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1373
1374 al_buf = hal_pkt->bufs;
1375 for (a = 0; a < nsegs; a++) {
1376 al_buf->addr = segs[a].ds_addr;
1377 al_buf->len = segs[a].ds_len;
1378
1379 al_buf++;
1380 }
1381
1382 hal_pkt->num_of_bufs = nsegs;
1383
1384 /* prepare the packet's descriptors to dma engine */
1385 tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1386
1387 if (tx_info->tx_descs == 0)
1388 goto exit;
1389
1390 /*
1391 * stop the queue when no more space available, the packet can have up
1392 * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1393 */
1394 if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1395 (AL_ETH_PKT_MAX_BUFS + 2))) {
1396 tx_ring->stall = 1;
1397 device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1398 tx_ring->ring_id);
1399 al_data_memory_barrier();
1400 }
1401
1402 tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1403
1404 /* trigger the dma engine */
1405 al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1406 return;
1407
1408 exit:
1409 m_freem(m);
1410 }
1411
1412 static void
1413 al_eth_tx_cmpl_work(void *arg, int pending)
1414 {
1415 struct al_eth_ring *tx_ring = arg;
1416
1417 if (napi != 0) {
1418 tx_ring->cmpl_is_running = 1;
1419 al_data_memory_barrier();
1420 }
1421
1422 al_eth_tx_do_cleanup(tx_ring);
1423
1424 if (napi != 0) {
1425 tx_ring->cmpl_is_running = 0;
1426 al_data_memory_barrier();
1427 }
1428 /* all work done, enable IRQs */
1429 al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1430 }
1431
1432 static int
1433 al_eth_tx_cmlp_irq_filter(void *arg)
1434 {
1435 struct al_eth_ring *tx_ring = arg;
1436
1437 /* Interrupt should be auto-masked upon arrival */
1438
1439 device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1440 tx_ring->ring_id);
1441
1442 /*
1443 * For napi, if work is not running, schedule it. Always schedule
1444 * for casual (non-napi) packet handling.
1445 */
1446 if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1447 taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1448
1449 /* Do not run bottom half */
1450 return (FILTER_HANDLED);
1451 }
1452
1453 static int
1454 al_eth_rx_recv_irq_filter(void *arg)
1455 {
1456 struct al_eth_ring *rx_ring = arg;
1457
1458 /* Interrupt should be auto-masked upon arrival */
1459
1460 device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1461 rx_ring->ring_id);
1462
1463 /*
1464 * For napi, if work is not running, schedule it. Always schedule
1465 * for casual (non-napi) packet handling.
1466 */
1467 if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1468 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1469
1470 /* Do not run bottom half */
1471 return (FILTER_HANDLED);
1472 }
1473
1474 /*
1475 * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1476 * @adapter: structure containing adapter specific data
1477 * @hal_pkt: HAL structure for the packet
1478 * @mbuf: mbuf currently being received and modified
1479 */
1480 static inline void
1481 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1482 struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1483 {
1484
1485 /* if IPv4 and error */
1486 if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) &&
1487 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1488 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1489 device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1490 return;
1491 }
1492
1493 /* if IPv6 and error */
1494 if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1495 (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1496 (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1497 device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1498 return;
1499 }
1500
1501 /* if TCP/UDP */
1502 if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1503 (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1504 if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1505 device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1506
1507 /* TCP/UDP checksum error */
1508 mbuf->m_pkthdr.csum_flags = 0;
1509 } else {
1510 device_printf_dbg(adapter->dev, "rx checksum correct\n");
1511
1512 /* IP Checksum Good */
1513 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1514 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1515 }
1516 }
1517 }
1518
1519 static struct mbuf*
1520 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1521 struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1522 unsigned int descs, uint16_t *next_to_clean)
1523 {
1524 struct mbuf *mbuf;
1525 struct al_eth_rx_buffer *rx_info =
1526 &rx_ring->rx_buffer_info[*next_to_clean];
1527 unsigned int len;
1528
1529 len = hal_pkt->bufs[0].len;
1530 device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1531 rx_info->m);
1532
1533 if (rx_info->m == NULL) {
1534 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1535 *next_to_clean);
1536 return (NULL);
1537 }
1538
1539 mbuf = rx_info->m;
1540 mbuf->m_pkthdr.len = len;
1541 mbuf->m_len = len;
1542 mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1543 mbuf->m_flags |= M_PKTHDR;
1544
1545 if (len <= adapter->small_copy_len) {
1546 struct mbuf *smbuf;
1547 device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1548
1549 AL_RX_LOCK(adapter);
1550 smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1551 AL_RX_UNLOCK(adapter);
1552 if (__predict_false(smbuf == NULL)) {
1553 device_printf(adapter->dev, "smbuf is NULL\n");
1554 return (NULL);
1555 }
1556
1557 smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1558 memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1559
1560 smbuf->m_len = len;
1561 smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1562
1563 /* first desc of a non-ps chain */
1564 smbuf->m_flags |= M_PKTHDR;
1565 smbuf->m_pkthdr.len = smbuf->m_len;
1566
1567 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1568 *next_to_clean);
1569
1570 return (smbuf);
1571 }
1572 mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1573
1574 /* Unmap the buffer */
1575 bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1576
1577 rx_info->m = NULL;
1578 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1579
1580 return (mbuf);
1581 }
1582
1583 static void
1584 al_eth_rx_recv_work(void *arg, int pending)
1585 {
1586 struct al_eth_ring *rx_ring = arg;
1587 struct mbuf *mbuf;
1588 struct lro_entry *queued;
1589 unsigned int qid = rx_ring->ring_id;
1590 struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1591 uint16_t next_to_clean = rx_ring->next_to_clean;
1592 uint32_t refill_required;
1593 uint32_t refill_actual;
1594 uint32_t do_if_input;
1595
1596 if (napi != 0) {
1597 rx_ring->enqueue_is_running = 1;
1598 al_data_memory_barrier();
1599 }
1600
1601 do {
1602 unsigned int descs;
1603
1604 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1605 if (unlikely(descs == 0))
1606 break;
1607
1608 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1609 "from hal. descs %d\n", qid, descs);
1610 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1611 "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1612 hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1613
1614 /* ignore if detected dma or eth controller errors */
1615 if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1616 AL_UDMA_CDESC_ERROR)) != 0) {
1617 device_printf(rx_ring->dev, "receive packet with error. "
1618 "flags = 0x%x\n", hal_pkt->flags);
1619 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1620 next_to_clean, descs);
1621 continue;
1622 }
1623
1624 /* allocate mbuf and fill it */
1625 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1626 &next_to_clean);
1627
1628 /* exit if we failed to retrieve a buffer */
1629 if (unlikely(mbuf == NULL)) {
1630 next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1631 next_to_clean, descs);
1632 break;
1633 }
1634
1635 if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM ||
1636 rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) {
1637 al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1638 }
1639
1640 mbuf->m_pkthdr.flowid = qid;
1641 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1642
1643 /*
1644 * LRO is only for IP/TCP packets and TCP checksum of the packet
1645 * should be computed by hardware.
1646 */
1647 do_if_input = 1;
1648 if ((rx_ring->lro_enabled != 0) &&
1649 ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1650 hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1651 /*
1652 * Send to the stack if:
1653 * - LRO not enabled, or
1654 * - no LRO resources, or
1655 * - lro enqueue fails
1656 */
1657 if (rx_ring->lro.lro_cnt != 0) {
1658 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1659 do_if_input = 0;
1660 }
1661 }
1662
1663 if (do_if_input)
1664 (*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf);
1665
1666 } while (1);
1667
1668 rx_ring->next_to_clean = next_to_clean;
1669
1670 refill_required = al_udma_available_get(rx_ring->dma_q);
1671 refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1672 refill_required);
1673
1674 if (unlikely(refill_actual < refill_required)) {
1675 device_printf_dbg(rx_ring->dev,
1676 "%s: not filling rx queue %d\n", __func__, qid);
1677 }
1678
1679 while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1680 LIST_REMOVE(queued, next);
1681 tcp_lro_flush(&rx_ring->lro, queued);
1682 }
1683
1684 if (napi != 0) {
1685 rx_ring->enqueue_is_running = 0;
1686 al_data_memory_barrier();
1687 }
1688 /* unmask irq */
1689 al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1690 }
1691
1692 static void
1693 al_eth_start_xmit(void *arg, int pending)
1694 {
1695 struct al_eth_ring *tx_ring = arg;
1696 struct mbuf *mbuf;
1697
1698 if (napi != 0) {
1699 tx_ring->enqueue_is_running = 1;
1700 al_data_memory_barrier();
1701 }
1702
1703 while (1) {
1704 mtx_lock(&tx_ring->br_mtx);
1705 mbuf = drbr_dequeue(NULL, tx_ring->br);
1706 mtx_unlock(&tx_ring->br_mtx);
1707
1708 if (mbuf == NULL)
1709 break;
1710
1711 al_eth_xmit_mbuf(tx_ring, mbuf);
1712 }
1713
1714 if (napi != 0) {
1715 tx_ring->enqueue_is_running = 0;
1716 al_data_memory_barrier();
1717 while (1) {
1718 mtx_lock(&tx_ring->br_mtx);
1719 mbuf = drbr_dequeue(NULL, tx_ring->br);
1720 mtx_unlock(&tx_ring->br_mtx);
1721 if (mbuf == NULL)
1722 break;
1723 al_eth_xmit_mbuf(tx_ring, mbuf);
1724 }
1725 }
1726 }
1727
1728 static int
1729 al_mq_start(struct ifnet *ifp, struct mbuf *m)
1730 {
1731 struct al_eth_adapter *adapter = ifp->if_softc;
1732 struct al_eth_ring *tx_ring;
1733 int i;
1734 int ret;
1735
1736 /* Which queue to use */
1737 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1738 i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1739 else
1740 i = curcpu % adapter->num_tx_queues;
1741
1742 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1743 IFF_DRV_RUNNING) {
1744 return (EFAULT);
1745 }
1746
1747 tx_ring = &adapter->tx_ring[i];
1748
1749 device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1750 "sending packet to queue %d\n", i);
1751
1752 ret = drbr_enqueue(ifp, tx_ring->br, m);
1753
1754 /*
1755 * For napi, if work is not running, schedule it. Always schedule
1756 * for casual (non-napi) packet handling.
1757 */
1758 if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1759 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1760
1761 return (ret);
1762 }
1763
1764 static void
1765 al_qflush(struct ifnet * ifp)
1766 {
1767
1768 /* unused */
1769 }
1770
1771 static inline void
1772 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1773 {
1774 uint8_t default_flow_ctrl;
1775
1776 default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1777 default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1778
1779 adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1780 }
1781
1782 static int
1783 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1784 {
1785 struct al_eth_flow_control_params *flow_ctrl_params;
1786 uint8_t active = adapter->link_config.flow_ctrl_active;
1787 int i;
1788
1789 flow_ctrl_params = &adapter->flow_ctrl_params;
1790
1791 flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1792 flow_ctrl_params->obay_enable =
1793 ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1794 flow_ctrl_params->gen_enable =
1795 ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1796
1797 flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1798 flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1799 flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1800 flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1801
1802 /* map priority to queue index, queue id = priority/2 */
1803 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1804 flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1);
1805
1806 al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1807
1808 return (0);
1809 }
1810
1811 static void
1812 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1813 {
1814
1815 /*
1816 * change the active configuration to the default / force by ethtool
1817 * and call to configure
1818 */
1819 adapter->link_config.flow_ctrl_active =
1820 adapter->link_config.flow_ctrl_supported;
1821
1822 al_eth_flow_ctrl_config(adapter);
1823 }
1824
1825 static void
1826 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1827 {
1828
1829 adapter->link_config.flow_ctrl_active = 0;
1830 al_eth_flow_ctrl_config(adapter);
1831 }
1832
1833 static int
1834 al_eth_hw_init(struct al_eth_adapter *adapter)
1835 {
1836 int rc;
1837
1838 rc = al_eth_hw_init_adapter(adapter);
1839 if (rc != 0)
1840 return (rc);
1841
1842 rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1843 if (rc < 0) {
1844 device_printf(adapter->dev, "%s failed to configure mac!\n",
1845 __func__);
1846 return (rc);
1847 }
1848
1849 if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1850 (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1851 adapter->phy_exist == FALSE)) {
1852 rc = al_eth_mac_link_config(&adapter->hal_adapter,
1853 adapter->link_config.force_1000_base_x,
1854 adapter->link_config.autoneg,
1855 adapter->link_config.active_speed,
1856 adapter->link_config.active_duplex);
1857 if (rc != 0) {
1858 device_printf(adapter->dev,
1859 "%s failed to configure link parameters!\n",
1860 __func__);
1861 return (rc);
1862 }
1863 }
1864
1865 rc = al_eth_mdio_config(&adapter->hal_adapter,
1866 AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */,
1867 adapter->ref_clk_freq, adapter->mdio_freq);
1868 if (rc != 0) {
1869 device_printf(adapter->dev, "%s failed at mdio config!\n",
1870 __func__);
1871 return (rc);
1872 }
1873
1874 al_eth_flow_ctrl_init(adapter);
1875
1876 return (rc);
1877 }
1878
1879 static int
1880 al_eth_hw_stop(struct al_eth_adapter *adapter)
1881 {
1882
1883 al_eth_mac_stop(&adapter->hal_adapter);
1884
1885 /*
1886 * wait till pending rx packets written and UDMA becomes idle,
1887 * the MAC has ~10KB fifo, 10us should be enough time for the
1888 * UDMA to write to the memory
1889 */
1890 DELAY(10);
1891
1892 al_eth_adapter_stop(&adapter->hal_adapter);
1893
1894 adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1895
1896 /* disable flow ctrl to avoid pause packets*/
1897 al_eth_flow_ctrl_disable(adapter);
1898
1899 return (0);
1900 }
1901
1902 /*
1903 * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1904 * @irq: interrupt number
1905 * @data: pointer to a network interface device structure
1906 */
1907 static int
1908 al_eth_intr_intx_all(void *data)
1909 {
1910 struct al_eth_adapter *adapter = data;
1911
1912 struct unit_regs __iomem *regs_base =
1913 (struct unit_regs __iomem *)adapter->udma_base;
1914 uint32_t reg;
1915
1916 reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1917 AL_INT_GROUP_A);
1918 if (likely(reg))
1919 device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1920 __func__, reg);
1921
1922 if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1923 struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1924 uint32_t cause_d = al_udma_iofic_read_cause(regs_base,
1925 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1926
1927 sec_ints_base =
1928 ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1929 if (cause_d != 0) {
1930 device_printf_dbg(adapter->dev,
1931 "got interrupt from group D. cause %x\n", cause_d);
1932
1933 cause_d = al_iofic_read_cause(sec_ints_base,
1934 AL_INT_GROUP_A);
1935 device_printf(adapter->dev,
1936 "secondary A cause %x\n", cause_d);
1937
1938 cause_d = al_iofic_read_cause(sec_ints_base,
1939 AL_INT_GROUP_B);
1940
1941 device_printf_dbg(adapter->dev,
1942 "secondary B cause %x\n", cause_d);
1943 }
1944 }
1945 if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1946 uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1947 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1948 int qid;
1949 device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1950 cause_b);
1951 for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1952 if (cause_b & (1 << qid)) {
1953 /* mask */
1954 al_udma_iofic_mask(
1955 (struct unit_regs __iomem *)adapter->udma_base,
1956 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1957 AL_INT_GROUP_B, 1 << qid);
1958 }
1959 }
1960 }
1961 if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1962 uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1963 AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1964 int qid;
1965 device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1966 for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1967 if ((cause_c & (1 << qid)) != 0) {
1968 al_udma_iofic_mask(
1969 (struct unit_regs __iomem *)adapter->udma_base,
1970 AL_UDMA_IOFIC_LEVEL_PRIMARY,
1971 AL_INT_GROUP_C, 1 << qid);
1972 }
1973 }
1974 }
1975
1976 al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1977
1978 return (0);
1979 }
1980
1981 static int
1982 al_eth_intr_msix_all(void *data)
1983 {
1984 struct al_eth_adapter *adapter = data;
1985
1986 device_printf_dbg(adapter->dev, "%s\n", __func__);
1987 return (0);
1988 }
1989
1990 static int
1991 al_eth_intr_msix_mgmt(void *data)
1992 {
1993 struct al_eth_adapter *adapter = data;
1994
1995 device_printf_dbg(adapter->dev, "%s\n", __func__);
1996 return (0);
1997 }
1998
1999 static int
2000 al_eth_enable_msix(struct al_eth_adapter *adapter)
2001 {
2002 int i, msix_vecs, rc, count;
2003
2004 device_printf_dbg(adapter->dev, "%s\n", __func__);
2005 msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2006
2007 device_printf_dbg(adapter->dev,
2008 "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2009
2010 adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2011 M_IFAL, M_ZERO | M_WAITOK);
2012
2013 if (adapter->msix_entries == NULL) {
2014 device_printf_dbg(adapter->dev, "failed to allocate"
2015 " msix_entries %d\n", msix_vecs);
2016 rc = ENOMEM;
2017 goto exit;
2018 }
2019
2020 /* management vector (GROUP_A) @2*/
2021 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2022 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2023
2024 /* rx queues start @3 */
2025 for (i = 0; i < adapter->num_rx_queues; i++) {
2026 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2027
2028 adapter->msix_entries[irq_idx].entry = 3 + i;
2029 adapter->msix_entries[irq_idx].vector = 0;
2030 }
2031 /* tx queues start @7 */
2032 for (i = 0; i < adapter->num_tx_queues; i++) {
2033 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2034
2035 adapter->msix_entries[irq_idx].entry = 3 +
2036 AL_ETH_MAX_HW_QUEUES + i;
2037 adapter->msix_entries[irq_idx].vector = 0;
2038 }
2039
2040 count = msix_vecs + 2; /* entries start from 2 */
2041 rc = pci_alloc_msix(adapter->dev, &count);
2042
2043 if (rc != 0) {
2044 device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2045 "vectors %d\n", msix_vecs+2);
2046 device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2047 goto msix_entries_exit;
2048 }
2049
2050 if (count != msix_vecs + 2) {
2051 device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2052 "vectors %d, allocated %d\n", msix_vecs+2, count);
2053 rc = ENOSPC;
2054 goto msix_entries_exit;
2055 }
2056
2057 for (i = 0; i < msix_vecs; i++)
2058 adapter->msix_entries[i].vector = 2 + 1 + i;
2059
2060 device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2061 " vectors %d\n", msix_vecs);
2062
2063 adapter->msix_vecs = msix_vecs;
2064 adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2065 goto exit;
2066
2067 msix_entries_exit:
2068 adapter->msix_vecs = 0;
2069 free(adapter->msix_entries, M_IFAL);
2070 adapter->msix_entries = NULL;
2071
2072 exit:
2073 return (rc);
2074 }
2075
2076 static int
2077 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2078 {
2079 int i, rc;
2080
2081 rc = al_eth_enable_msix(adapter);
2082 if (rc != 0) {
2083 device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2084 return (rc);
2085 }
2086
2087 adapter->irq_vecs = max(1, adapter->msix_vecs);
2088 /* single INTX mode */
2089 if (adapter->msix_vecs == 0) {
2090 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2091 AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2092 device_get_name(adapter->dev));
2093 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2094 al_eth_intr_intx_all;
2095 /* IRQ vector will be resolved from device resources */
2096 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2097 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2098
2099 device_printf(adapter->dev, "%s and vector %d \n", __func__,
2100 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2101
2102 return (0);
2103 }
2104 /* single MSI-X mode */
2105 if (adapter->msix_vecs == 1) {
2106 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2107 AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2108 device_get_name(adapter->dev));
2109 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2110 al_eth_intr_msix_all;
2111 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2112 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2113 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2114
2115 return (0);
2116 }
2117 /* MSI-X per queue */
2118 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2119 "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2120 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2121
2122 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2123 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2124 adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2125
2126 for (i = 0; i < adapter->num_rx_queues; i++) {
2127 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2128
2129 snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2130 "al-eth-rx-comp-%d@pci:%s", i,
2131 device_get_name(adapter->dev));
2132 adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2133 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2134 adapter->irq_tbl[irq_idx].vector =
2135 adapter->msix_entries[irq_idx].vector;
2136 }
2137
2138 for (i = 0; i < adapter->num_tx_queues; i++) {
2139 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2140
2141 snprintf(adapter->irq_tbl[irq_idx].name,
2142 AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2143 device_get_name(adapter->dev));
2144 adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2145 adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2146 adapter->irq_tbl[irq_idx].vector =
2147 adapter->msix_entries[irq_idx].vector;
2148 }
2149
2150 return (0);
2151 }
2152
2153 static void
2154 __al_eth_free_irq(struct al_eth_adapter *adapter)
2155 {
2156 struct al_eth_irq *irq;
2157 int i, rc;
2158
2159 for (i = 0; i < adapter->irq_vecs; i++) {
2160 irq = &adapter->irq_tbl[i];
2161 if (irq->requested != 0) {
2162 device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2163 irq->vector);
2164 rc = bus_teardown_intr(adapter->dev, irq->res,
2165 irq->cookie);
2166 if (rc != 0)
2167 device_printf(adapter->dev, "failed to tear "
2168 "down irq: %d\n", irq->vector);
2169 }
2170 irq->requested = 0;
2171 }
2172 }
2173
2174 static void
2175 al_eth_free_irq(struct al_eth_adapter *adapter)
2176 {
2177 struct al_eth_irq *irq;
2178 int i, rc;
2179 #ifdef CONFIG_RFS_ACCEL
2180 if (adapter->msix_vecs >= 1) {
2181 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2182 adapter->netdev->rx_cpu_rmap = NULL;
2183 }
2184 #endif
2185
2186 __al_eth_free_irq(adapter);
2187
2188 for (i = 0; i < adapter->irq_vecs; i++) {
2189 irq = &adapter->irq_tbl[i];
2190 if (irq->res == NULL)
2191 continue;
2192 device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2193 irq->vector);
2194 rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2195 irq->res);
2196 irq->res = NULL;
2197 if (rc != 0)
2198 device_printf(adapter->dev, "dev has no parent while "
2199 "releasing res for irq: %d\n", irq->vector);
2200 }
2201
2202 pci_release_msi(adapter->dev);
2203
2204 adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2205
2206 adapter->msix_vecs = 0;
2207 free(adapter->msix_entries, M_IFAL);
2208 adapter->msix_entries = NULL;
2209 }
2210
2211 static int
2212 al_eth_request_irq(struct al_eth_adapter *adapter)
2213 {
2214 unsigned long flags;
2215 struct al_eth_irq *irq;
2216 int rc = 0, i, v;
2217
2218 if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2219 flags = RF_ACTIVE;
2220 else
2221 flags = RF_ACTIVE | RF_SHAREABLE;
2222
2223 for (i = 0; i < adapter->irq_vecs; i++) {
2224 irq = &adapter->irq_tbl[i];
2225
2226 if (irq->requested != 0)
2227 continue;
2228
2229 irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2230 &irq->vector, flags);
2231 if (irq->res == NULL) {
2232 device_printf(adapter->dev, "could not allocate "
2233 "irq vector=%d\n", irq->vector);
2234 rc = ENXIO;
2235 goto exit_res;
2236 }
2237
2238 if ((rc = bus_setup_intr(adapter->dev, irq->res,
2239 INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2240 NULL, irq->data, &irq->cookie)) != 0) {
2241 device_printf(adapter->dev, "failed to register "
2242 "interrupt handler for irq %ju: %d\n",
2243 (uintmax_t)rman_get_start(irq->res), rc);
2244 goto exit_intr;
2245 }
2246 irq->requested = 1;
2247 }
2248 goto exit;
2249
2250 exit_intr:
2251 v = i - 1; /* -1 because we omit the operation that failed */
2252 while (v-- >= 0) {
2253 int bti;
2254 irq = &adapter->irq_tbl[v];
2255 bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2256 if (bti != 0) {
2257 device_printf(adapter->dev, "failed to tear "
2258 "down irq: %d\n", irq->vector);
2259 }
2260
2261 irq->requested = 0;
2262 device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2263 irq->vector);
2264 }
2265
2266 exit_res:
2267 v = i - 1; /* -1 because we omit the operation that failed */
2268 while (v-- >= 0) {
2269 int brr;
2270 irq = &adapter->irq_tbl[v];
2271 device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2272 " for irq %d\n", irq->vector);
2273 brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2274 irq->vector, irq->res);
2275 if (brr != 0)
2276 device_printf(adapter->dev, "dev has no parent while "
2277 "releasing res for irq: %d\n", irq->vector);
2278 irq->res = NULL;
2279 }
2280
2281 exit:
2282 return (rc);
2283 }
2284
2285 /**
2286 * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2287 * @adapter: network interface device structure
2288 * @qid: queue index
2289 *
2290 * Return 0 on success, negative on failure
2291 **/
2292 static int
2293 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2294 {
2295 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2296 device_t dev = tx_ring->dev;
2297 struct al_udma_q_params *q_params = &tx_ring->q_params;
2298 int size;
2299 int ret;
2300
2301 if (adapter->up)
2302 return (0);
2303
2304 size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2305
2306 tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2307 if (tx_ring->tx_buffer_info == NULL)
2308 return (ENOMEM);
2309
2310 tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2311 q_params->size = tx_ring->hw_count;
2312
2313 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2314 (bus_dmamap_t *)&q_params->desc_phy_base_map,
2315 (bus_addr_t *)&q_params->desc_phy_base,
2316 (void**)&q_params->desc_base, tx_ring->descs_size);
2317 if (ret != 0) {
2318 device_printf(dev, "failed to al_dma_alloc_coherent,"
2319 " ret = %d\n", ret);
2320 return (ENOMEM);
2321 }
2322
2323 if (q_params->desc_base == NULL)
2324 return (ENOMEM);
2325
2326 device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2327
2328 /* Allocate Ring Queue */
2329 mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2330 tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2331 &tx_ring->br_mtx);
2332 if (tx_ring->br == NULL) {
2333 device_printf(dev, "Critical Failure setting up buf ring\n");
2334 return (ENOMEM);
2335 }
2336
2337 /* Allocate taskqueues */
2338 TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2339 tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2340 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2341 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2342 device_get_nameunit(adapter->dev));
2343 TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2344 tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2345 taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2346 taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2347 device_get_nameunit(adapter->dev));
2348
2349 /* Setup DMA descriptor areas. */
2350 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2351 1, 0, /* alignment, bounds */
2352 BUS_SPACE_MAXADDR, /* lowaddr */
2353 BUS_SPACE_MAXADDR, /* highaddr */
2354 NULL, NULL, /* filter, filterarg */
2355 AL_TSO_SIZE, /* maxsize */
2356 AL_ETH_PKT_MAX_BUFS, /* nsegments */
2357 PAGE_SIZE, /* maxsegsize */
2358 0, /* flags */
2359 NULL, /* lockfunc */
2360 NULL, /* lockfuncarg */
2361 &tx_ring->dma_buf_tag);
2362
2363 if (ret != 0) {
2364 device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2365 ret);
2366 return (ret);
2367 }
2368
2369 for (size = 0; size < tx_ring->sw_count; size++) {
2370 ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2371 &tx_ring->tx_buffer_info[size].dma_map);
2372 if (ret != 0) {
2373 device_printf(dev, "Unable to map DMA TX "
2374 "buffer memory [iter=%d]\n", size);
2375 return (ret);
2376 }
2377 }
2378
2379 /* completion queue not used for tx */
2380 q_params->cdesc_base = NULL;
2381 /* size in bytes of the udma completion ring descriptor */
2382 q_params->cdesc_size = 8;
2383 tx_ring->next_to_use = 0;
2384 tx_ring->next_to_clean = 0;
2385
2386 return (0);
2387 }
2388
2389 /*
2390 * al_eth_free_tx_resources - Free Tx Resources per Queue
2391 * @adapter: network interface device structure
2392 * @qid: queue index
2393 *
2394 * Free all transmit software resources
2395 */
2396 static void
2397 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2398 {
2399 struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2400 struct al_udma_q_params *q_params = &tx_ring->q_params;
2401 int size;
2402
2403 /* At this point interrupts' handlers must be deactivated */
2404 while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2405 taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2406
2407 taskqueue_free(tx_ring->cmpl_tq);
2408 while (taskqueue_cancel(tx_ring->enqueue_tq,
2409 &tx_ring->enqueue_task, NULL)) {
2410 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2411 }
2412
2413 taskqueue_free(tx_ring->enqueue_tq);
2414
2415 if (tx_ring->br != NULL) {
2416 drbr_flush(adapter->netdev, tx_ring->br);
2417 buf_ring_free(tx_ring->br, M_DEVBUF);
2418 }
2419
2420 for (size = 0; size < tx_ring->sw_count; size++) {
2421 m_freem(tx_ring->tx_buffer_info[size].m);
2422 tx_ring->tx_buffer_info[size].m = NULL;
2423
2424 bus_dmamap_unload(tx_ring->dma_buf_tag,
2425 tx_ring->tx_buffer_info[size].dma_map);
2426 bus_dmamap_destroy(tx_ring->dma_buf_tag,
2427 tx_ring->tx_buffer_info[size].dma_map);
2428 }
2429 bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2430
2431 free(tx_ring->tx_buffer_info, M_IFAL);
2432 tx_ring->tx_buffer_info = NULL;
2433
2434 mtx_destroy(&tx_ring->br_mtx);
2435
2436 /* if not set, then don't free */
2437 if (q_params->desc_base == NULL)
2438 return;
2439
2440 al_dma_free_coherent(q_params->desc_phy_base_tag,
2441 q_params->desc_phy_base_map, q_params->desc_base);
2442
2443 q_params->desc_base = NULL;
2444 }
2445
2446 /*
2447 * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2448 * @adapter: board private structure
2449 *
2450 * Free all transmit software resources
2451 */
2452 static void
2453 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2454 {
2455 int i;
2456
2457 for (i = 0; i < adapter->num_tx_queues; i++)
2458 if (adapter->tx_ring[i].q_params.desc_base)
2459 al_eth_free_tx_resources(adapter, i);
2460 }
2461
2462 /*
2463 * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2464 * @adapter: network interface device structure
2465 * @qid: queue index
2466 *
2467 * Returns 0 on success, negative on failure
2468 */
2469 static int
2470 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2471 {
2472 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2473 device_t dev = rx_ring->dev;
2474 struct al_udma_q_params *q_params = &rx_ring->q_params;
2475 int size;
2476 int ret;
2477
2478 size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2479
2480 /* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2481 size += 1;
2482
2483 rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2484 if (rx_ring->rx_buffer_info == NULL)
2485 return (ENOMEM);
2486
2487 rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2488 q_params->size = rx_ring->hw_count;
2489
2490 ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2491 &q_params->desc_phy_base_map,
2492 (bus_addr_t *)&q_params->desc_phy_base,
2493 (void**)&q_params->desc_base, rx_ring->descs_size);
2494
2495 if ((q_params->desc_base == NULL) || (ret != 0))
2496 return (ENOMEM);
2497
2498 /* size in bytes of the udma completion ring descriptor */
2499 q_params->cdesc_size = 16;
2500 rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2501 ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2502 &q_params->cdesc_phy_base_map,
2503 (bus_addr_t *)&q_params->cdesc_phy_base,
2504 (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2505
2506 if ((q_params->cdesc_base == NULL) || (ret != 0))
2507 return (ENOMEM);
2508
2509 /* Allocate taskqueues */
2510 NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2511 rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2512 taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2513 taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2514 device_get_nameunit(adapter->dev));
2515
2516 /* Setup DMA descriptor areas. */
2517 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2518 1, 0, /* alignment, bounds */
2519 BUS_SPACE_MAXADDR, /* lowaddr */
2520 BUS_SPACE_MAXADDR, /* highaddr */
2521 NULL, NULL, /* filter, filterarg */
2522 AL_TSO_SIZE, /* maxsize */
2523 1, /* nsegments */
2524 AL_TSO_SIZE, /* maxsegsize */
2525 0, /* flags */
2526 NULL, /* lockfunc */
2527 NULL, /* lockfuncarg */
2528 &rx_ring->dma_buf_tag);
2529
2530 if (ret != 0) {
2531 device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2532 return (ret);
2533 }
2534
2535 for (size = 0; size < rx_ring->sw_count; size++) {
2536 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2537 &rx_ring->rx_buffer_info[size].dma_map);
2538 if (ret != 0) {
2539 device_printf(dev,"Unable to map DMA RX buffer memory\n");
2540 return (ret);
2541 }
2542 }
2543
2544 /* Zero out the descriptor ring */
2545 memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2546
2547 /* Create LRO for the ring */
2548 if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) {
2549 int err = tcp_lro_init(&rx_ring->lro);
2550 if (err != 0) {
2551 device_printf(adapter->dev,
2552 "LRO[%d] Initialization failed!\n", qid);
2553 } else {
2554 device_printf_dbg(adapter->dev,
2555 "RX Soft LRO[%d] Initialized\n", qid);
2556 rx_ring->lro_enabled = TRUE;
2557 rx_ring->lro.ifp = adapter->netdev;
2558 }
2559 }
2560
2561 rx_ring->next_to_clean = 0;
2562 rx_ring->next_to_use = 0;
2563
2564 return (0);
2565 }
2566
2567 /*
2568 * al_eth_free_rx_resources - Free Rx Resources
2569 * @adapter: network interface device structure
2570 * @qid: queue index
2571 *
2572 * Free all receive software resources
2573 */
2574 static void
2575 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2576 {
2577 struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2578 struct al_udma_q_params *q_params = &rx_ring->q_params;
2579 int size;
2580
2581 /* At this point interrupts' handlers must be deactivated */
2582 while (taskqueue_cancel(rx_ring->enqueue_tq,
2583 &rx_ring->enqueue_task, NULL)) {
2584 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2585 }
2586
2587 taskqueue_free(rx_ring->enqueue_tq);
2588
2589 for (size = 0; size < rx_ring->sw_count; size++) {
2590 m_freem(rx_ring->rx_buffer_info[size].m);
2591 rx_ring->rx_buffer_info[size].m = NULL;
2592 bus_dmamap_unload(rx_ring->dma_buf_tag,
2593 rx_ring->rx_buffer_info[size].dma_map);
2594 bus_dmamap_destroy(rx_ring->dma_buf_tag,
2595 rx_ring->rx_buffer_info[size].dma_map);
2596 }
2597 bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2598
2599 free(rx_ring->rx_buffer_info, M_IFAL);
2600 rx_ring->rx_buffer_info = NULL;
2601
2602 /* if not set, then don't free */
2603 if (q_params->desc_base == NULL)
2604 return;
2605
2606 al_dma_free_coherent(q_params->desc_phy_base_tag,
2607 q_params->desc_phy_base_map, q_params->desc_base);
2608
2609 q_params->desc_base = NULL;
2610
2611 /* if not set, then don't free */
2612 if (q_params->cdesc_base == NULL)
2613 return;
2614
2615 al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2616 q_params->cdesc_phy_base_map, q_params->cdesc_base);
2617
2618 q_params->cdesc_phy_base = 0;
2619
2620 /* Free LRO resources */
2621 tcp_lro_free(&rx_ring->lro);
2622 }
2623
2624 /*
2625 * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2626 * @adapter: board private structure
2627 *
2628 * Free all receive software resources
2629 */
2630 static void
2631 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2632 {
2633 int i;
2634
2635 for (i = 0; i < adapter->num_rx_queues; i++)
2636 if (adapter->rx_ring[i].q_params.desc_base != 0)
2637 al_eth_free_rx_resources(adapter, i);
2638 }
2639
2640 /*
2641 * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2642 * @adapter: board private structure
2643 *
2644 * Return 0 on success, negative on failure
2645 */
2646 static int
2647 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2648 {
2649 int i, rc = 0;
2650
2651 for (i = 0; i < adapter->num_rx_queues; i++) {
2652 rc = al_eth_setup_rx_resources(adapter, i);
2653 if (rc == 0)
2654 continue;
2655
2656 device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2657 goto err_setup_rx;
2658 }
2659 return (0);
2660
2661 err_setup_rx:
2662 /* rewind the index freeing the rings as we go */
2663 while (i--)
2664 al_eth_free_rx_resources(adapter, i);
2665 return (rc);
2666 }
2667
2668 /*
2669 * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2670 * @adapter: private structure
2671 *
2672 * Return 0 on success, negative on failure
2673 */
2674 static int
2675 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2676 {
2677 int i, rc = 0;
2678
2679 for (i = 0; i < adapter->num_tx_queues; i++) {
2680 rc = al_eth_setup_tx_resources(adapter, i);
2681 if (rc == 0)
2682 continue;
2683
2684 device_printf(adapter->dev,
2685 "Allocation for Tx Queue %u failed\n", i);
2686 goto err_setup_tx;
2687 }
2688
2689 return (0);
2690
2691 err_setup_tx:
2692 /* rewind the index freeing the rings as we go */
2693 while (i--)
2694 al_eth_free_tx_resources(adapter, i);
2695
2696 return (rc);
2697 }
2698
2699 static void
2700 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2701 {
2702
2703 /* disable forwarding interrupts from eth through pci end point */
2704 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2705 (adapter->board_type == ALPINE_NIC)) {
2706 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2707 AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2708 }
2709
2710 /* mask hw interrupts */
2711 al_eth_interrupts_mask(adapter);
2712 }
2713
2714 static void
2715 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2716 {
2717 uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2718 uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2719 uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2720 uint32_t group_d_mask = 3 << 8;
2721 struct unit_regs __iomem *regs_base =
2722 (struct unit_regs __iomem *)adapter->udma_base;
2723
2724 if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2725 group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2726 AL_INT_GROUP_A_GROUP_C_SUM |
2727 AL_INT_GROUP_A_GROUP_D_SUM;
2728
2729 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2730 AL_INT_GROUP_A, group_a_mask);
2731 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2732 AL_INT_GROUP_B, group_b_mask);
2733 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2734 AL_INT_GROUP_C, group_c_mask);
2735 al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2736 AL_INT_GROUP_D, group_d_mask);
2737 }
2738
2739 static void
2740 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2741 {
2742 struct unit_regs __iomem *regs_base =
2743 (struct unit_regs __iomem *)adapter->udma_base;
2744
2745 /* mask all interrupts */
2746 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2747 AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2748 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2749 AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2750 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2751 AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2752 al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2753 AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2754 }
2755
2756 static int
2757 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2758 {
2759 enum al_iofic_mode int_mode;
2760 uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2761 uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2762 uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2763 uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2764
2765 /* single INTX mode */
2766 if (adapter->msix_vecs == 0)
2767 int_mode = AL_IOFIC_MODE_LEGACY;
2768 else if (adapter->msix_vecs > 1)
2769 int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2770 else {
2771 device_printf(adapter->dev,
2772 "udma doesn't support single MSI-X mode yet.\n");
2773 return (EIO);
2774 }
2775
2776 if (adapter->board_type != ALPINE_INTEGRATED) {
2777 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2778 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2779 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2780 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2781 }
2782
2783 if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2784 int_mode, m2s_errors_disable, m2s_aborts_disable,
2785 s2m_errors_disable, s2m_aborts_disable)) {
2786 device_printf(adapter->dev,
2787 "al_udma_unit_int_config failed!.\n");
2788 return (EIO);
2789 }
2790 adapter->int_mode = int_mode;
2791 device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2792 int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2793 int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2794 /* set interrupt moderation resolution to 15us */
2795 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2796 al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2797 /* by default interrupt coalescing is disabled */
2798 adapter->tx_usecs = 0;
2799 adapter->rx_usecs = 0;
2800
2801 return (0);
2802 }
2803
2804 /*
2805 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2806 * @index: Index in RX flow hash indirection table
2807 * @n_rx_rings: Number of RX rings to use
2808 *
2809 * This function provides the default policy for RX flow hash indirection.
2810 */
2811 static inline uint32_t
2812 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2813 {
2814
2815 return (index % n_rx_rings);
2816 }
2817
2818 static void*
2819 al_eth_update_stats(struct al_eth_adapter *adapter)
2820 {
2821 struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2822
2823 if (adapter->up == 0)
2824 return (NULL);
2825
2826 al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2827
2828 return (NULL);
2829 }
2830
2831 static uint64_t
2832 al_get_counter(struct ifnet *ifp, ift_counter cnt)
2833 {
2834 struct al_eth_adapter *adapter;
2835 struct al_eth_mac_stats *mac_stats;
2836 uint64_t rv;
2837
2838 adapter = if_getsoftc(ifp);
2839 mac_stats = &adapter->mac_stats;
2840
2841 switch (cnt) {
2842 case IFCOUNTER_IPACKETS:
2843 return (mac_stats->aFramesReceivedOK); /* including pause frames */
2844 case IFCOUNTER_OPACKETS:
2845 return (mac_stats->aFramesTransmittedOK);
2846 case IFCOUNTER_IBYTES:
2847 return (mac_stats->aOctetsReceivedOK);
2848 case IFCOUNTER_OBYTES:
2849 return (mac_stats->aOctetsTransmittedOK);
2850 case IFCOUNTER_IMCASTS:
2851 return (mac_stats->ifInMulticastPkts);
2852 case IFCOUNTER_OMCASTS:
2853 return (mac_stats->ifOutMulticastPkts);
2854 case IFCOUNTER_COLLISIONS:
2855 return (0);
2856 case IFCOUNTER_IQDROPS:
2857 return (mac_stats->etherStatsDropEvents);
2858 case IFCOUNTER_IERRORS:
2859 rv = mac_stats->ifInErrors +
2860 mac_stats->etherStatsUndersizePkts + /* good but short */
2861 mac_stats->etherStatsFragments + /* short and bad*/
2862 mac_stats->etherStatsJabbers + /* with crc errors */
2863 mac_stats->etherStatsOversizePkts +
2864 mac_stats->aFrameCheckSequenceErrors +
2865 mac_stats->aAlignmentErrors;
2866 return (rv);
2867 case IFCOUNTER_OERRORS:
2868 return (mac_stats->ifOutErrors);
2869 default:
2870 return (if_get_counter_default(ifp, cnt));
2871 }
2872 }
2873
2874 static u_int
2875 al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2876 {
2877 unsigned char *mac;
2878
2879 mac = LLADDR(sdl);
2880 /* default mc address inside mac address */
2881 if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2882 return (1);
2883 else
2884 return (0);
2885 }
2886
2887 static u_int
2888 al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2889 {
2890 struct al_eth_adapter *adapter = arg;
2891
2892 al_eth_mac_table_unicast_add(adapter,
2893 AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1);
2894
2895 return (1);
2896 }
2897
2898 /*
2899 * Unicast, Multicast and Promiscuous mode set
2900 *
2901 * The set_rx_mode entry point is called whenever the unicast or multicast
2902 * address lists or the network interface flags are updated. This routine is
2903 * responsible for configuring the hardware for proper unicast, multicast,
2904 * promiscuous mode, and all-multi behavior.
2905 */
2906 static void
2907 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2908 {
2909 struct ifnet *ifp = adapter->netdev;
2910 int mc, uc;
2911 uint8_t i;
2912
2913 /* XXXGL: why generic count won't work? */
2914 mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL);
2915 uc = if_lladdr_count(ifp);
2916
2917 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2918 al_eth_mac_table_promiscuous_set(adapter, true);
2919 } else {
2920 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2921 /* This interface is in all-multicasts mode (used by multicast routers). */
2922 al_eth_mac_table_all_multicast_add(adapter,
2923 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2924 } else {
2925 if (mc == 0) {
2926 al_eth_mac_table_entry_clear(adapter,
2927 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2928 } else {
2929 al_eth_mac_table_all_multicast_add(adapter,
2930 AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2931 }
2932 }
2933 if (uc != 0) {
2934 i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2935 if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2936 /*
2937 * In this case there are more addresses then
2938 * entries in the mac table - set promiscuous
2939 */
2940 al_eth_mac_table_promiscuous_set(adapter, true);
2941 return;
2942 }
2943
2944 /* clear the last configuration */
2945 while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2946 AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2947 al_eth_mac_table_entry_clear(adapter, i);
2948 i++;
2949 }
2950
2951 /* set new addresses */
2952 if_foreach_lladdr(ifp, al_program_addr, adapter);
2953 }
2954 al_eth_mac_table_promiscuous_set(adapter, false);
2955 }
2956 }
2957
2958 static void
2959 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2960 {
2961 struct al_eth_fwd_ctrl_table_entry entry;
2962 int i;
2963
2964 /* let priority be equal to pbits */
2965 for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2966 al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2967
2968 /* map priority to queue index, queue id = priority/2 */
2969 for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2970 al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2971
2972 entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2973 entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2974 entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2975 entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2976 entry.filter = FALSE;
2977
2978 al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry);
2979
2980 /*
2981 * By default set the mac table to forward all unicast packets to our
2982 * MAC address and all broadcast. all the rest will be dropped.
2983 */
2984 al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
2985 1);
2986 al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
2987 al_eth_mac_table_promiscuous_set(adapter, false);
2988
2989 /* set toeplitz hash keys */
2990 for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
2991 *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
2992
2993 for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
2994 al_eth_hash_key_set(&adapter->hal_adapter, i,
2995 htonl(adapter->toeplitz_hash_key[i]));
2996
2997 for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
2998 adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
2999 AL_ETH_NUM_QUEUES);
3000 al_eth_set_thash_table_entry(adapter, i, 0,
3001 adapter->rss_ind_tbl[i]);
3002 }
3003
3004 al_eth_fsm_table_init(adapter);
3005 }
3006
3007 static void
3008 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3009 {
3010
3011 /*
3012 * Determine the correct mbuf pool
3013 * for doing jumbo frames
3014 * Try from the smallest up to maximum supported
3015 */
3016 adapter->rx_mbuf_sz = MCLBYTES;
3017 if (size > 2048) {
3018 if (adapter->max_rx_buff_alloc_size > 2048)
3019 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3020 else
3021 return;
3022 }
3023 if (size > 4096) {
3024 if (adapter->max_rx_buff_alloc_size > 4096)
3025 adapter->rx_mbuf_sz = MJUM9BYTES;
3026 else
3027 return;
3028 }
3029 if (size > 9216) {
3030 if (adapter->max_rx_buff_alloc_size > 9216)
3031 adapter->rx_mbuf_sz = MJUM16BYTES;
3032 else
3033 return;
3034 }
3035 }
3036
3037 static int
3038 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3039 {
3040 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3041 ETHER_VLAN_ENCAP_LEN;
3042
3043 al_eth_req_rx_buff_size(adapter, new_mtu);
3044
3045 device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3046 al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3047 AL_ETH_MIN_FRAME_LEN, max_frame);
3048
3049 al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3050
3051 return (0);
3052 }
3053
3054 static int
3055 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3056 {
3057 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3058
3059 if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3060 (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3061 return (EINVAL);
3062 }
3063
3064 return (0);
3065 }
3066
3067 static int
3068 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3069 int qid)
3070 {
3071 int rc = 0;
3072 char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3073 struct al_udma_q_params *q_params;
3074
3075 if (type == UDMA_TX)
3076 q_params = &adapter->tx_ring[qid].q_params;
3077 else
3078 q_params = &adapter->rx_ring[qid].q_params;
3079
3080 rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3081 if (rc < 0) {
3082 device_printf(adapter->dev, "config %s queue %u failed\n", name,
3083 qid);
3084 return (rc);
3085 }
3086 return (rc);
3087 }
3088
3089 static int
3090 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3091 {
3092 int i;
3093
3094 for (i = 0; i < adapter->num_tx_queues; i++)
3095 al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3096
3097 for (i = 0; i < adapter->num_rx_queues; i++)
3098 al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3099
3100 return (0);
3101 }
3102
3103 static void
3104 al_eth_up_complete(struct al_eth_adapter *adapter)
3105 {
3106
3107 al_eth_configure_int_mode(adapter);
3108 al_eth_config_rx_fwd(adapter);
3109 al_eth_change_mtu(adapter, adapter->netdev->if_mtu);
3110 al_eth_udma_queues_enable_all(adapter);
3111 al_eth_refill_all_rx_bufs(adapter);
3112 al_eth_interrupts_unmask(adapter);
3113
3114 /* enable forwarding interrupts from eth through pci end point */
3115 if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3116 (adapter->board_type == ALPINE_NIC)) {
3117 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3118 AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3119 }
3120
3121 al_eth_flow_ctrl_enable(adapter);
3122
3123 mtx_lock(&adapter->stats_mtx);
3124 callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3125 mtx_unlock(&adapter->stats_mtx);
3126
3127 al_eth_mac_start(&adapter->hal_adapter);
3128 }
3129
3130 static int
3131 al_media_update(struct ifnet *ifp)
3132 {
3133 struct al_eth_adapter *adapter = ifp->if_softc;
3134
3135 if ((ifp->if_flags & IFF_UP) != 0)
3136 mii_mediachg(adapter->mii);
3137
3138 return (0);
3139 }
3140
3141 static void
3142 al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3143 {
3144 struct al_eth_adapter *sc = ifp->if_softc;
3145 struct mii_data *mii;
3146
3147 if (sc->mii == NULL) {
3148 ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3149 ifmr->ifm_status = 0;
3150
3151 return;
3152 }
3153
3154 mii = sc->mii;
3155 mii_pollstat(mii);
3156
3157 ifmr->ifm_active = mii->mii_media_active;
3158 ifmr->ifm_status = mii->mii_media_status;
3159 }
3160
3161 static void
3162 al_tick(void *arg)
3163 {
3164 struct al_eth_adapter *adapter = arg;
3165
3166 mii_tick(adapter->mii);
3167
3168 /* Schedule another timeout one second from now */
3169 callout_schedule(&adapter->wd_callout, hz);
3170 }
3171
3172 static void
3173 al_tick_stats(void *arg)
3174 {
3175 struct al_eth_adapter *adapter = arg;
3176
3177 al_eth_update_stats(adapter);
3178
3179 callout_schedule(&adapter->stats_callout, hz);
3180 }
3181
3182 static int
3183 al_eth_up(struct al_eth_adapter *adapter)
3184 {
3185 struct ifnet *ifp = adapter->netdev;
3186 int rc;
3187
3188 if (adapter->up)
3189 return (0);
3190
3191 if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3192 al_eth_function_reset(adapter);
3193 adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3194 }
3195
3196 ifp->if_hwassist = 0;
3197 if ((ifp->if_capenable & IFCAP_TSO) != 0)
3198 ifp->if_hwassist |= CSUM_TSO;
3199 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3200 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
3201 if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0)
3202 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
3203
3204 al_eth_serdes_init(adapter);
3205
3206 rc = al_eth_hw_init(adapter);
3207 if (rc != 0)
3208 goto err_hw_init_open;
3209
3210 rc = al_eth_setup_int_mode(adapter);
3211 if (rc != 0) {
3212 device_printf(adapter->dev,
3213 "%s failed at setup interrupt mode!\n", __func__);
3214 goto err_setup_int;
3215 }
3216
3217 /* allocate transmit descriptors */
3218 rc = al_eth_setup_all_tx_resources(adapter);
3219 if (rc != 0)
3220 goto err_setup_tx;
3221
3222 /* allocate receive descriptors */
3223 rc = al_eth_setup_all_rx_resources(adapter);
3224 if (rc != 0)
3225 goto err_setup_rx;
3226
3227 rc = al_eth_request_irq(adapter);
3228 if (rc != 0)
3229 goto err_req_irq;
3230
3231 al_eth_up_complete(adapter);
3232
3233 adapter->up = true;
3234
3235 if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3236 adapter->netdev->if_link_state = LINK_STATE_UP;
3237
3238 if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3239 mii_mediachg(adapter->mii);
3240
3241 /* Schedule watchdog timeout */
3242 mtx_lock(&adapter->wd_mtx);
3243 callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3244 mtx_unlock(&adapter->wd_mtx);
3245
3246 mii_pollstat(adapter->mii);
3247 }
3248
3249 return (rc);
3250
3251 err_req_irq:
3252 al_eth_free_all_rx_resources(adapter);
3253 err_setup_rx:
3254 al_eth_free_all_tx_resources(adapter);
3255 err_setup_tx:
3256 al_eth_free_irq(adapter);
3257 err_setup_int:
3258 al_eth_hw_stop(adapter);
3259 err_hw_init_open:
3260 al_eth_function_reset(adapter);
3261
3262 return (rc);
3263 }
3264
3265 static int
3266 al_shutdown(device_t dev)
3267 {
3268 struct al_eth_adapter *adapter = device_get_softc(dev);
3269
3270 al_eth_down(adapter);
3271
3272 return (0);
3273 }
3274
3275 static void
3276 al_eth_down(struct al_eth_adapter *adapter)
3277 {
3278
3279 device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3280
3281 adapter->up = false;
3282
3283 mtx_lock(&adapter->wd_mtx);
3284 callout_stop(&adapter->wd_callout);
3285 mtx_unlock(&adapter->wd_mtx);
3286
3287 al_eth_disable_int_sync(adapter);
3288
3289 mtx_lock(&adapter->stats_mtx);
3290 callout_stop(&adapter->stats_callout);
3291 mtx_unlock(&adapter->stats_mtx);
3292
3293 al_eth_free_irq(adapter);
3294 al_eth_hw_stop(adapter);
3295
3296 al_eth_free_all_tx_resources(adapter);
3297 al_eth_free_all_rx_resources(adapter);
3298 }
3299
3300 static int
3301 al_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3302 {
3303 struct al_eth_adapter *adapter = ifp->if_softc;
3304 struct ifreq *ifr = (struct ifreq *)data;
3305 int error = 0;
3306
3307 switch (command) {
3308 case SIOCSIFMTU:
3309 {
3310 error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3311 if (error != 0) {
3312 device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3313 adapter->netdev->if_mtu);
3314 break;
3315 }
3316
3317 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3318 adapter->netdev->if_mtu = ifr->ifr_mtu;
3319 al_init(adapter);
3320 break;
3321 }
3322 case SIOCSIFFLAGS:
3323 if ((ifp->if_flags & IFF_UP) != 0) {
3324 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3325 if (((ifp->if_flags ^ adapter->if_flags) &
3326 (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3327 device_printf_dbg(adapter->dev,
3328 "ioctl promisc/allmulti\n");
3329 al_eth_set_rx_mode(adapter);
3330 }
3331 } else {
3332 error = al_eth_up(adapter);
3333 if (error == 0)
3334 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3335 }
3336 } else {
3337 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3338 al_eth_down(adapter);
3339 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3340 }
3341 }
3342
3343 adapter->if_flags = ifp->if_flags;
3344 break;
3345
3346 case SIOCADDMULTI:
3347 case SIOCDELMULTI:
3348 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3349 device_printf_dbg(adapter->dev,
3350 "ioctl add/del multi before\n");
3351 al_eth_set_rx_mode(adapter);
3352 #ifdef DEVICE_POLLING
3353 if ((ifp->if_capenable & IFCAP_POLLING) == 0)
3354 #endif
3355 }
3356 break;
3357 case SIOCSIFMEDIA:
3358 case SIOCGIFMEDIA:
3359 if (adapter->mii != NULL)
3360 error = ifmedia_ioctl(ifp, ifr,
3361 &adapter->mii->mii_media, command);
3362 else
3363 error = ifmedia_ioctl(ifp, ifr,
3364 &adapter->media, command);
3365 break;
3366 case SIOCSIFCAP:
3367 {
3368 int mask, reinit;
3369
3370 reinit = 0;
3371 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3372 #ifdef DEVICE_POLLING
3373 if ((mask & IFCAP_POLLING) != 0) {
3374 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3375 if (error != 0)
3376 return (error);
3377 ifp->if_capenable |= IFCAP_POLLING;
3378 } else {
3379 error = ether_poll_deregister(ifp);
3380 /* Enable interrupt even in error case */
3381 ifp->if_capenable &= ~IFCAP_POLLING;
3382 }
3383 }
3384 #endif
3385 if ((mask & IFCAP_HWCSUM) != 0) {
3386 /* apply to both rx and tx */
3387 ifp->if_capenable ^= IFCAP_HWCSUM;
3388 reinit = 1;
3389 }
3390 if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3391 ifp->if_capenable ^= IFCAP_HWCSUM_IPV6;
3392 reinit = 1;
3393 }
3394 if ((mask & IFCAP_TSO) != 0) {
3395 ifp->if_capenable ^= IFCAP_TSO;
3396 reinit = 1;
3397 }
3398 if ((mask & IFCAP_LRO) != 0) {
3399 ifp->if_capenable ^= IFCAP_LRO;
3400 }
3401 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3402 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3403 reinit = 1;
3404 }
3405 if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3406 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3407 reinit = 1;
3408 }
3409 if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3410 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3411 reinit = 1;
3412 }
3413 if ((reinit != 0) &&
3414 ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0)
3415 {
3416 al_init(adapter);
3417 }
3418 break;
3419 }
3420
3421 default:
3422 error = ether_ioctl(ifp, command, data);
3423 break;
3424 }
3425
3426 return (error);
3427 }
3428
3429 static int
3430 al_is_device_supported(device_t dev)
3431 {
3432 uint16_t pci_vendor_id = pci_get_vendor(dev);
3433 uint16_t pci_device_id = pci_get_device(dev);
3434
3435 return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3436 (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3437 pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3438 pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3439 pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3440 }
3441
3442 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3443 #define MDIO_TIMEOUT_MSEC 100
3444 #define MDIO_PAUSE_MSEC 10
3445
3446 static int
3447 al_miibus_readreg(device_t dev, int phy, int reg)
3448 {
3449 struct al_eth_adapter *adapter = device_get_softc(dev);
3450 uint16_t value = 0;
3451 int rc;
3452 int timeout = MDIO_TIMEOUT_MSEC;
3453
3454 while (timeout > 0) {
3455 rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3456 -1, reg, &value);
3457
3458 if (rc == 0)
3459 return (value);
3460
3461 device_printf_dbg(adapter->dev,
3462 "mdio read failed. try again in 10 msec\n");
3463
3464 timeout -= MDIO_PAUSE_MSEC;
3465 pause("readred pause", MDIO_PAUSE_MSEC);
3466 }
3467
3468 if (rc != 0)
3469 device_printf(adapter->dev, "MDIO read failed on timeout\n");
3470
3471 return (value);
3472 }
3473
3474 static int
3475 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3476 {
3477 struct al_eth_adapter *adapter = device_get_softc(dev);
3478 int rc;
3479 int timeout = MDIO_TIMEOUT_MSEC;
3480
3481 while (timeout > 0) {
3482 rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3483 -1, reg, value);
3484
3485 if (rc == 0)
3486 return (0);
3487
3488 device_printf(adapter->dev,
3489 "mdio write failed. try again in 10 msec\n");
3490
3491 timeout -= MDIO_PAUSE_MSEC;
3492 pause("miibus writereg", MDIO_PAUSE_MSEC);
3493 }
3494
3495 if (rc != 0)
3496 device_printf(adapter->dev, "MDIO write failed on timeout\n");
3497
3498 return (rc);
3499 }
3500
3501 static void
3502 al_miibus_statchg(device_t dev)
3503 {
3504 struct al_eth_adapter *adapter = device_get_softc(dev);
3505
3506 device_printf_dbg(adapter->dev,
3507 "al_miibus_statchg: state has changed!\n");
3508 device_printf_dbg(adapter->dev,
3509 "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3510 adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3511
3512 if (adapter->up == 0)
3513 return;
3514
3515 if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3516 if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3517 device_printf(adapter->dev, "link is UP\n");
3518 adapter->netdev->if_link_state = LINK_STATE_UP;
3519 } else {
3520 device_printf(adapter->dev, "link is DOWN\n");
3521 adapter->netdev->if_link_state = LINK_STATE_DOWN;
3522 }
3523 }
3524 }
3525
3526 static void
3527 al_miibus_linkchg(device_t dev)
3528 {
3529 struct al_eth_adapter *adapter = device_get_softc(dev);
3530 uint8_t duplex = 0;
3531 uint8_t speed = 0;
3532
3533 if (adapter->mii == NULL)
3534 return;
3535
3536 if ((adapter->netdev->if_flags & IFF_UP) == 0)
3537 return;
3538
3539 /* Ignore link changes when link is not ready */
3540 if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3541 (IFM_AVALID | IFM_ACTIVE)) {
3542 return;
3543 }
3544
3545 if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3546 duplex = 1;
3547
3548 speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3549
3550 if (speed == IFM_10_T) {
3551 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3552 AL_10BASE_T_SPEED, duplex);
3553 return;
3554 }
3555
3556 if (speed == IFM_100_TX) {
3557 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3558 AL_100BASE_TX_SPEED, duplex);
3559 return;
3560 }
3561
3562 if (speed == IFM_1000_T) {
3563 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3564 AL_1000BASE_T_SPEED, duplex);
3565 return;
3566 }
3567
3568 device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3569 adapter->mii->mii_media_active);
3570 }
Cache object: beb97641ab3d5f8cc328f2b66b6e1b38
|