FreeBSD/Linux Kernel Cross Reference
sys/dev/ale/if_ale.c
1 /*-
2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.3/sys/dev/ale/if_ale.c 229058 2011-12-31 01:08:31Z yongari $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
47
48 #include <net/bpf.h>
49 #include <net/if.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_llc.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_vlan_var.h>
57
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68
69 #include <machine/bus.h>
70 #include <machine/in_cksum.h>
71
72 #include <dev/ale/if_alereg.h>
73 #include <dev/ale/if_alevar.h>
74
75 /* "device miibus" required. See GENERIC if you get errors here. */
76 #include "miibus_if.h"
77
78 /* For more information about Tx checksum offload issues see ale_encap(). */
79 #define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
80
81 MODULE_DEPEND(ale, pci, 1, 1, 1);
82 MODULE_DEPEND(ale, ether, 1, 1, 1);
83 MODULE_DEPEND(ale, miibus, 1, 1, 1);
84
85 /* Tunables. */
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.ale.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.ale.msix_disable", &msix_disable);
90
91 /*
92 * Devices supported by this driver.
93 */
94 static struct ale_dev {
95 uint16_t ale_vendorid;
96 uint16_t ale_deviceid;
97 const char *ale_name;
98 } ale_devs[] = {
99 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX,
100 "Atheros AR8121/AR8113/AR8114 PCIe Ethernet" },
101 };
102
103 static int ale_attach(device_t);
104 static int ale_check_boundary(struct ale_softc *);
105 static int ale_detach(device_t);
106 static int ale_dma_alloc(struct ale_softc *);
107 static void ale_dma_free(struct ale_softc *);
108 static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int);
109 static int ale_encap(struct ale_softc *, struct mbuf **);
110 static void ale_get_macaddr(struct ale_softc *);
111 static void ale_init(void *);
112 static void ale_init_locked(struct ale_softc *);
113 static void ale_init_rx_pages(struct ale_softc *);
114 static void ale_init_tx_ring(struct ale_softc *);
115 static void ale_int_task(void *, int);
116 static int ale_intr(void *);
117 static int ale_ioctl(struct ifnet *, u_long, caddr_t);
118 static void ale_link_task(void *, int);
119 static void ale_mac_config(struct ale_softc *);
120 static int ale_miibus_readreg(device_t, int, int);
121 static void ale_miibus_statchg(device_t);
122 static int ale_miibus_writereg(device_t, int, int, int);
123 static int ale_mediachange(struct ifnet *);
124 static void ale_mediastatus(struct ifnet *, struct ifmediareq *);
125 static void ale_phy_reset(struct ale_softc *);
126 static int ale_probe(device_t);
127 static void ale_reset(struct ale_softc *);
128 static int ale_resume(device_t);
129 static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
130 uint32_t, uint32_t *);
131 static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
132 static int ale_rxeof(struct ale_softc *sc, int);
133 static void ale_rxfilter(struct ale_softc *);
134 static void ale_rxvlan(struct ale_softc *);
135 static void ale_setlinkspeed(struct ale_softc *);
136 static void ale_setwol(struct ale_softc *);
137 static int ale_shutdown(device_t);
138 static void ale_start(struct ifnet *);
139 static void ale_start_locked(struct ifnet *);
140 static void ale_stats_clear(struct ale_softc *);
141 static void ale_stats_update(struct ale_softc *);
142 static void ale_stop(struct ale_softc *);
143 static void ale_stop_mac(struct ale_softc *);
144 static int ale_suspend(device_t);
145 static void ale_sysctl_node(struct ale_softc *);
146 static void ale_tick(void *);
147 static void ale_txeof(struct ale_softc *);
148 static void ale_watchdog(struct ale_softc *);
149 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
150 static int sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS);
151 static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS);
152
153 static device_method_t ale_methods[] = {
154 /* Device interface. */
155 DEVMETHOD(device_probe, ale_probe),
156 DEVMETHOD(device_attach, ale_attach),
157 DEVMETHOD(device_detach, ale_detach),
158 DEVMETHOD(device_shutdown, ale_shutdown),
159 DEVMETHOD(device_suspend, ale_suspend),
160 DEVMETHOD(device_resume, ale_resume),
161
162 /* MII interface. */
163 DEVMETHOD(miibus_readreg, ale_miibus_readreg),
164 DEVMETHOD(miibus_writereg, ale_miibus_writereg),
165 DEVMETHOD(miibus_statchg, ale_miibus_statchg),
166
167 { NULL, NULL }
168 };
169
170 static driver_t ale_driver = {
171 "ale",
172 ale_methods,
173 sizeof(struct ale_softc)
174 };
175
176 static devclass_t ale_devclass;
177
178 DRIVER_MODULE(ale, pci, ale_driver, ale_devclass, 0, 0);
179 DRIVER_MODULE(miibus, ale, miibus_driver, miibus_devclass, 0, 0);
180
181 static struct resource_spec ale_res_spec_mem[] = {
182 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
183 { -1, 0, 0 }
184 };
185
186 static struct resource_spec ale_irq_spec_legacy[] = {
187 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
188 { -1, 0, 0 }
189 };
190
191 static struct resource_spec ale_irq_spec_msi[] = {
192 { SYS_RES_IRQ, 1, RF_ACTIVE },
193 { -1, 0, 0 }
194 };
195
196 static struct resource_spec ale_irq_spec_msix[] = {
197 { SYS_RES_IRQ, 1, RF_ACTIVE },
198 { -1, 0, 0 }
199 };
200
201 static int
202 ale_miibus_readreg(device_t dev, int phy, int reg)
203 {
204 struct ale_softc *sc;
205 uint32_t v;
206 int i;
207
208 sc = device_get_softc(dev);
209
210 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
211 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
212 for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
213 DELAY(5);
214 v = CSR_READ_4(sc, ALE_MDIO);
215 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
216 break;
217 }
218
219 if (i == 0) {
220 device_printf(sc->ale_dev, "phy read timeout : %d\n", reg);
221 return (0);
222 }
223
224 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
225 }
226
227 static int
228 ale_miibus_writereg(device_t dev, int phy, int reg, int val)
229 {
230 struct ale_softc *sc;
231 uint32_t v;
232 int i;
233
234 sc = device_get_softc(dev);
235
236 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
237 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
238 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
239 for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
240 DELAY(5);
241 v = CSR_READ_4(sc, ALE_MDIO);
242 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
243 break;
244 }
245
246 if (i == 0)
247 device_printf(sc->ale_dev, "phy write timeout : %d\n", reg);
248
249 return (0);
250 }
251
252 static void
253 ale_miibus_statchg(device_t dev)
254 {
255 struct ale_softc *sc;
256
257 sc = device_get_softc(dev);
258
259 taskqueue_enqueue(taskqueue_swi, &sc->ale_link_task);
260 }
261
262 static void
263 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
264 {
265 struct ale_softc *sc;
266 struct mii_data *mii;
267
268 sc = ifp->if_softc;
269 ALE_LOCK(sc);
270 mii = device_get_softc(sc->ale_miibus);
271
272 mii_pollstat(mii);
273 ifmr->ifm_status = mii->mii_media_status;
274 ifmr->ifm_active = mii->mii_media_active;
275 ALE_UNLOCK(sc);
276 }
277
278 static int
279 ale_mediachange(struct ifnet *ifp)
280 {
281 struct ale_softc *sc;
282 struct mii_data *mii;
283 struct mii_softc *miisc;
284 int error;
285
286 sc = ifp->if_softc;
287 ALE_LOCK(sc);
288 mii = device_get_softc(sc->ale_miibus);
289 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
290 mii_phy_reset(miisc);
291 error = mii_mediachg(mii);
292 ALE_UNLOCK(sc);
293
294 return (error);
295 }
296
297 static int
298 ale_probe(device_t dev)
299 {
300 struct ale_dev *sp;
301 int i;
302 uint16_t vendor, devid;
303
304 vendor = pci_get_vendor(dev);
305 devid = pci_get_device(dev);
306 sp = ale_devs;
307 for (i = 0; i < sizeof(ale_devs) / sizeof(ale_devs[0]); i++) {
308 if (vendor == sp->ale_vendorid &&
309 devid == sp->ale_deviceid) {
310 device_set_desc(dev, sp->ale_name);
311 return (BUS_PROBE_DEFAULT);
312 }
313 sp++;
314 }
315
316 return (ENXIO);
317 }
318
319 static void
320 ale_get_macaddr(struct ale_softc *sc)
321 {
322 uint32_t ea[2], reg;
323 int i, vpdc;
324
325 reg = CSR_READ_4(sc, ALE_SPI_CTRL);
326 if ((reg & SPI_VPD_ENB) != 0) {
327 reg &= ~SPI_VPD_ENB;
328 CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
329 }
330
331 if (pci_find_extcap(sc->ale_dev, PCIY_VPD, &vpdc) == 0) {
332 /*
333 * PCI VPD capability found, let TWSI reload EEPROM.
334 * This will set ethernet address of controller.
335 */
336 CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
337 TWSI_CTRL_SW_LD_START);
338 for (i = 100; i > 0; i--) {
339 DELAY(1000);
340 reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
341 if ((reg & TWSI_CTRL_SW_LD_START) == 0)
342 break;
343 }
344 if (i == 0)
345 device_printf(sc->ale_dev,
346 "reloading EEPROM timeout!\n");
347 } else {
348 if (bootverbose)
349 device_printf(sc->ale_dev,
350 "PCI VPD capability not found!\n");
351 }
352
353 ea[0] = CSR_READ_4(sc, ALE_PAR0);
354 ea[1] = CSR_READ_4(sc, ALE_PAR1);
355 sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
356 sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
357 sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
358 sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
359 sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
360 sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
361 }
362
363 static void
364 ale_phy_reset(struct ale_softc *sc)
365 {
366
367 /* Reset magic from Linux. */
368 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
369 GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
370 GPHY_CTRL_PHY_PLL_ON);
371 DELAY(1000);
372 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
373 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
374 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
375 DELAY(1000);
376
377 #define ATPHY_DBG_ADDR 0x1D
378 #define ATPHY_DBG_DATA 0x1E
379
380 /* Enable hibernation mode. */
381 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
382 ATPHY_DBG_ADDR, 0x0B);
383 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
384 ATPHY_DBG_DATA, 0xBC00);
385 /* Set Class A/B for all modes. */
386 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
387 ATPHY_DBG_ADDR, 0x00);
388 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
389 ATPHY_DBG_DATA, 0x02EF);
390 /* Enable 10BT power saving. */
391 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
392 ATPHY_DBG_ADDR, 0x12);
393 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
394 ATPHY_DBG_DATA, 0x4C04);
395 /* Adjust 1000T power. */
396 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
397 ATPHY_DBG_ADDR, 0x04);
398 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
399 ATPHY_DBG_ADDR, 0x8BBB);
400 /* 10BT center tap voltage. */
401 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
402 ATPHY_DBG_ADDR, 0x05);
403 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
404 ATPHY_DBG_ADDR, 0x2C46);
405
406 #undef ATPHY_DBG_ADDR
407 #undef ATPHY_DBG_DATA
408 DELAY(1000);
409 }
410
411 static int
412 ale_attach(device_t dev)
413 {
414 struct ale_softc *sc;
415 struct ifnet *ifp;
416 uint16_t burst;
417 int error, i, msic, msixc, pmc;
418 uint32_t rxf_len, txf_len;
419
420 error = 0;
421 sc = device_get_softc(dev);
422 sc->ale_dev = dev;
423
424 mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
425 MTX_DEF);
426 callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0);
427 TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc);
428 TASK_INIT(&sc->ale_link_task, 0, ale_link_task, sc);
429
430 /* Map the device. */
431 pci_enable_busmaster(dev);
432 sc->ale_res_spec = ale_res_spec_mem;
433 sc->ale_irq_spec = ale_irq_spec_legacy;
434 error = bus_alloc_resources(dev, sc->ale_res_spec, sc->ale_res);
435 if (error != 0) {
436 device_printf(dev, "cannot allocate memory resources.\n");
437 goto fail;
438 }
439
440 /* Set PHY address. */
441 sc->ale_phyaddr = ALE_PHY_ADDR;
442
443 /* Reset PHY. */
444 ale_phy_reset(sc);
445
446 /* Reset the ethernet controller. */
447 ale_reset(sc);
448
449 /* Get PCI and chip id/revision. */
450 sc->ale_rev = pci_get_revid(dev);
451 if (sc->ale_rev >= 0xF0) {
452 /* L2E Rev. B. AR8114 */
453 sc->ale_flags |= ALE_FLAG_FASTETHER;
454 } else {
455 if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
456 /* L1E AR8121 */
457 sc->ale_flags |= ALE_FLAG_JUMBO;
458 } else {
459 /* L2E Rev. A. AR8113 */
460 sc->ale_flags |= ALE_FLAG_FASTETHER;
461 }
462 }
463 /*
464 * All known controllers seems to require 4 bytes alignment
465 * of Tx buffers to make Tx checksum offload with custom
466 * checksum generation method work.
467 */
468 sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
469 /*
470 * All known controllers seems to have issues on Rx checksum
471 * offload for fragmented IP datagrams.
472 */
473 sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
474 /*
475 * Don't use Tx CMB. It is known to cause RRS update failure
476 * under certain circumstances. Typical phenomenon of the
477 * issue would be unexpected sequence number encountered in
478 * Rx handler.
479 */
480 sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
481 sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
482 MASTER_CHIP_REV_SHIFT;
483 if (bootverbose) {
484 device_printf(dev, "PCI device revision : 0x%04x\n",
485 sc->ale_rev);
486 device_printf(dev, "Chip id/revision : 0x%04x\n",
487 sc->ale_chip_rev);
488 }
489 txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
490 rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
491 /*
492 * Uninitialized hardware returns an invalid chip id/revision
493 * as well as 0xFFFFFFFF for Tx/Rx fifo length.
494 */
495 if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
496 rxf_len == 0xFFFFFFF) {
497 device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO "
498 "%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev,
499 txf_len, rxf_len);
500 error = ENXIO;
501 goto fail;
502 }
503 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len);
504
505 /* Allocate IRQ resources. */
506 msixc = pci_msix_count(dev);
507 msic = pci_msi_count(dev);
508 if (bootverbose) {
509 device_printf(dev, "MSIX count : %d\n", msixc);
510 device_printf(dev, "MSI count : %d\n", msic);
511 }
512
513 /* Prefer MSIX over MSI. */
514 if (msix_disable == 0 || msi_disable == 0) {
515 if (msix_disable == 0 && msixc == ALE_MSIX_MESSAGES &&
516 pci_alloc_msix(dev, &msixc) == 0) {
517 if (msic == ALE_MSIX_MESSAGES) {
518 device_printf(dev, "Using %d MSIX messages.\n",
519 msixc);
520 sc->ale_flags |= ALE_FLAG_MSIX;
521 sc->ale_irq_spec = ale_irq_spec_msix;
522 } else
523 pci_release_msi(dev);
524 }
525 if (msi_disable == 0 && (sc->ale_flags & ALE_FLAG_MSIX) == 0 &&
526 msic == ALE_MSI_MESSAGES &&
527 pci_alloc_msi(dev, &msic) == 0) {
528 if (msic == ALE_MSI_MESSAGES) {
529 device_printf(dev, "Using %d MSI messages.\n",
530 msic);
531 sc->ale_flags |= ALE_FLAG_MSI;
532 sc->ale_irq_spec = ale_irq_spec_msi;
533 } else
534 pci_release_msi(dev);
535 }
536 }
537
538 error = bus_alloc_resources(dev, sc->ale_irq_spec, sc->ale_irq);
539 if (error != 0) {
540 device_printf(dev, "cannot allocate IRQ resources.\n");
541 goto fail;
542 }
543
544 /* Get DMA parameters from PCIe device control register. */
545 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
546 sc->ale_flags |= ALE_FLAG_PCIE;
547 burst = pci_read_config(dev, i + 0x08, 2);
548 /* Max read request size. */
549 sc->ale_dma_rd_burst = ((burst >> 12) & 0x07) <<
550 DMA_CFG_RD_BURST_SHIFT;
551 /* Max payload size. */
552 sc->ale_dma_wr_burst = ((burst >> 5) & 0x07) <<
553 DMA_CFG_WR_BURST_SHIFT;
554 if (bootverbose) {
555 device_printf(dev, "Read request size : %d bytes.\n",
556 128 << ((burst >> 12) & 0x07));
557 device_printf(dev, "TLP payload size : %d bytes.\n",
558 128 << ((burst >> 5) & 0x07));
559 }
560 } else {
561 sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
562 sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
563 }
564
565 /* Create device sysctl node. */
566 ale_sysctl_node(sc);
567
568 if ((error = ale_dma_alloc(sc) != 0))
569 goto fail;
570
571 /* Load station address. */
572 ale_get_macaddr(sc);
573
574 ifp = sc->ale_ifp = if_alloc(IFT_ETHER);
575 if (ifp == NULL) {
576 device_printf(dev, "cannot allocate ifnet structure.\n");
577 error = ENXIO;
578 goto fail;
579 }
580
581 ifp->if_softc = sc;
582 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
583 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
584 ifp->if_ioctl = ale_ioctl;
585 ifp->if_start = ale_start;
586 ifp->if_init = ale_init;
587 ifp->if_snd.ifq_drv_maxlen = ALE_TX_RING_CNT - 1;
588 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
589 IFQ_SET_READY(&ifp->if_snd);
590 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4;
591 ifp->if_hwassist = ALE_CSUM_FEATURES | CSUM_TSO;
592 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
593 sc->ale_flags |= ALE_FLAG_PMCAP;
594 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
595 }
596 ifp->if_capenable = ifp->if_capabilities;
597
598 /* Set up MII bus. */
599 error = mii_attach(dev, &sc->ale_miibus, ifp, ale_mediachange,
600 ale_mediastatus, BMSR_DEFCAPMASK, sc->ale_phyaddr, MII_OFFSET_ANY,
601 0);
602 if (error != 0) {
603 device_printf(dev, "attaching PHYs failed\n");
604 goto fail;
605 }
606
607 ether_ifattach(ifp, sc->ale_eaddr);
608
609 /* VLAN capability setup. */
610 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
611 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
612 ifp->if_capenable = ifp->if_capabilities;
613 /*
614 * Even though controllers supported by ale(3) have Rx checksum
615 * offload bug the workaround for fragmented frames seemed to
616 * work so far. However it seems Rx checksum offload does not
617 * work under certain conditions. So disable Rx checksum offload
618 * until I find more clue about it but allow users to override it.
619 */
620 ifp->if_capenable &= ~IFCAP_RXCSUM;
621
622 /* Tell the upper layer(s) we support long frames. */
623 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
624
625 /* Create local taskq. */
626 sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK,
627 taskqueue_thread_enqueue, &sc->ale_tq);
628 if (sc->ale_tq == NULL) {
629 device_printf(dev, "could not create taskqueue.\n");
630 ether_ifdetach(ifp);
631 error = ENXIO;
632 goto fail;
633 }
634 taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq",
635 device_get_nameunit(sc->ale_dev));
636
637 if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
638 msic = ALE_MSIX_MESSAGES;
639 else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
640 msic = ALE_MSI_MESSAGES;
641 else
642 msic = 1;
643 for (i = 0; i < msic; i++) {
644 error = bus_setup_intr(dev, sc->ale_irq[i],
645 INTR_TYPE_NET | INTR_MPSAFE, ale_intr, NULL, sc,
646 &sc->ale_intrhand[i]);
647 if (error != 0)
648 break;
649 }
650 if (error != 0) {
651 device_printf(dev, "could not set up interrupt handler.\n");
652 taskqueue_free(sc->ale_tq);
653 sc->ale_tq = NULL;
654 ether_ifdetach(ifp);
655 goto fail;
656 }
657
658 fail:
659 if (error != 0)
660 ale_detach(dev);
661
662 return (error);
663 }
664
665 static int
666 ale_detach(device_t dev)
667 {
668 struct ale_softc *sc;
669 struct ifnet *ifp;
670 int i, msic;
671
672 sc = device_get_softc(dev);
673
674 ifp = sc->ale_ifp;
675 if (device_is_attached(dev)) {
676 ether_ifdetach(ifp);
677 ALE_LOCK(sc);
678 ale_stop(sc);
679 ALE_UNLOCK(sc);
680 callout_drain(&sc->ale_tick_ch);
681 taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
682 taskqueue_drain(taskqueue_swi, &sc->ale_link_task);
683 }
684
685 if (sc->ale_tq != NULL) {
686 taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
687 taskqueue_free(sc->ale_tq);
688 sc->ale_tq = NULL;
689 }
690
691 if (sc->ale_miibus != NULL) {
692 device_delete_child(dev, sc->ale_miibus);
693 sc->ale_miibus = NULL;
694 }
695 bus_generic_detach(dev);
696 ale_dma_free(sc);
697
698 if (ifp != NULL) {
699 if_free(ifp);
700 sc->ale_ifp = NULL;
701 }
702
703 if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
704 msic = ALE_MSIX_MESSAGES;
705 else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
706 msic = ALE_MSI_MESSAGES;
707 else
708 msic = 1;
709 for (i = 0; i < msic; i++) {
710 if (sc->ale_intrhand[i] != NULL) {
711 bus_teardown_intr(dev, sc->ale_irq[i],
712 sc->ale_intrhand[i]);
713 sc->ale_intrhand[i] = NULL;
714 }
715 }
716
717 bus_release_resources(dev, sc->ale_irq_spec, sc->ale_irq);
718 if ((sc->ale_flags & (ALE_FLAG_MSI | ALE_FLAG_MSIX)) != 0)
719 pci_release_msi(dev);
720 bus_release_resources(dev, sc->ale_res_spec, sc->ale_res);
721 mtx_destroy(&sc->ale_mtx);
722
723 return (0);
724 }
725
726 #define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
727 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
728
729 #if __FreeBSD_version > 800000
730 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
731 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
732 #else
733 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
734 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
735 #endif
736
737 static void
738 ale_sysctl_node(struct ale_softc *sc)
739 {
740 struct sysctl_ctx_list *ctx;
741 struct sysctl_oid_list *child, *parent;
742 struct sysctl_oid *tree;
743 struct ale_hw_stats *stats;
744 int error;
745
746 stats = &sc->ale_stats;
747 ctx = device_get_sysctl_ctx(sc->ale_dev);
748 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ale_dev));
749
750 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
751 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_rx_mod, 0,
752 sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation");
753 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
754 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_tx_mod, 0,
755 sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation");
756 /* Pull in device tunables. */
757 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
758 error = resource_int_value(device_get_name(sc->ale_dev),
759 device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod);
760 if (error == 0) {
761 if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN ||
762 sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) {
763 device_printf(sc->ale_dev, "int_rx_mod value out of "
764 "range; using default: %d\n",
765 ALE_IM_RX_TIMER_DEFAULT);
766 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
767 }
768 }
769 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
770 error = resource_int_value(device_get_name(sc->ale_dev),
771 device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod);
772 if (error == 0) {
773 if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN ||
774 sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) {
775 device_printf(sc->ale_dev, "int_tx_mod value out of "
776 "range; using default: %d\n",
777 ALE_IM_TX_TIMER_DEFAULT);
778 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
779 }
780 }
781 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
782 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_process_limit, 0,
783 sysctl_hw_ale_proc_limit, "I",
784 "max number of Rx events to process");
785 /* Pull in device tunables. */
786 sc->ale_process_limit = ALE_PROC_DEFAULT;
787 error = resource_int_value(device_get_name(sc->ale_dev),
788 device_get_unit(sc->ale_dev), "process_limit",
789 &sc->ale_process_limit);
790 if (error == 0) {
791 if (sc->ale_process_limit < ALE_PROC_MIN ||
792 sc->ale_process_limit > ALE_PROC_MAX) {
793 device_printf(sc->ale_dev,
794 "process_limit value out of range; "
795 "using default: %d\n", ALE_PROC_DEFAULT);
796 sc->ale_process_limit = ALE_PROC_DEFAULT;
797 }
798 }
799
800 /* Misc statistics. */
801 ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq",
802 &stats->reset_brk_seq,
803 "Controller resets due to broken Rx sequnce number");
804
805 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
806 NULL, "ATE statistics");
807 parent = SYSCTL_CHILDREN(tree);
808
809 /* Rx statistics. */
810 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
811 NULL, "Rx MAC statistics");
812 child = SYSCTL_CHILDREN(tree);
813 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
814 &stats->rx_frames, "Good frames");
815 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
816 &stats->rx_bcast_frames, "Good broadcast frames");
817 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
818 &stats->rx_mcast_frames, "Good multicast frames");
819 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
820 &stats->rx_pause_frames, "Pause control frames");
821 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
822 &stats->rx_control_frames, "Control frames");
823 ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
824 &stats->rx_crcerrs, "CRC errors");
825 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
826 &stats->rx_lenerrs, "Frames with length mismatched");
827 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
828 &stats->rx_bytes, "Good octets");
829 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
830 &stats->rx_bcast_bytes, "Good broadcast octets");
831 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
832 &stats->rx_mcast_bytes, "Good multicast octets");
833 ALE_SYSCTL_STAT_ADD32(ctx, child, "runts",
834 &stats->rx_runts, "Too short frames");
835 ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments",
836 &stats->rx_fragments, "Fragmented frames");
837 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
838 &stats->rx_pkts_64, "64 bytes frames");
839 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
840 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
841 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
842 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
843 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
844 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
845 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
846 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
847 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
848 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
849 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
850 &stats->rx_pkts_1519_max, "1519 to max frames");
851 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
852 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
853 ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
854 &stats->rx_fifo_oflows, "FIFO overflows");
855 ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
856 &stats->rx_rrs_errs, "Return status write-back errors");
857 ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
858 &stats->rx_alignerrs, "Alignment errors");
859 ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered",
860 &stats->rx_pkts_filtered,
861 "Frames dropped due to address filtering");
862
863 /* Tx statistics. */
864 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
865 NULL, "Tx MAC statistics");
866 child = SYSCTL_CHILDREN(tree);
867 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
868 &stats->tx_frames, "Good frames");
869 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
870 &stats->tx_bcast_frames, "Good broadcast frames");
871 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
872 &stats->tx_mcast_frames, "Good multicast frames");
873 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
874 &stats->tx_pause_frames, "Pause control frames");
875 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
876 &stats->tx_control_frames, "Control frames");
877 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
878 &stats->tx_excess_defer, "Frames with excessive derferrals");
879 ALE_SYSCTL_STAT_ADD32(ctx, child, "defers",
880 &stats->tx_excess_defer, "Frames with derferrals");
881 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
882 &stats->tx_bytes, "Good octets");
883 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
884 &stats->tx_bcast_bytes, "Good broadcast octets");
885 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
886 &stats->tx_mcast_bytes, "Good multicast octets");
887 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
888 &stats->tx_pkts_64, "64 bytes frames");
889 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
890 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
891 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
892 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
893 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
894 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
895 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
896 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
897 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
898 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
899 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
900 &stats->tx_pkts_1519_max, "1519 to max frames");
901 ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
902 &stats->tx_single_colls, "Single collisions");
903 ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
904 &stats->tx_multi_colls, "Multiple collisions");
905 ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
906 &stats->tx_late_colls, "Late collisions");
907 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
908 &stats->tx_excess_colls, "Excessive collisions");
909 ALE_SYSCTL_STAT_ADD32(ctx, child, "abort",
910 &stats->tx_abort, "Aborted frames due to Excessive collisions");
911 ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
912 &stats->tx_underrun, "FIFO underruns");
913 ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
914 &stats->tx_desc_underrun, "Descriptor write-back errors");
915 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
916 &stats->tx_lenerrs, "Frames with length mismatched");
917 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
918 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
919 }
920
921 #undef ALE_SYSCTL_STAT_ADD32
922 #undef ALE_SYSCTL_STAT_ADD64
923
924 struct ale_dmamap_arg {
925 bus_addr_t ale_busaddr;
926 };
927
928 static void
929 ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
930 {
931 struct ale_dmamap_arg *ctx;
932
933 if (error != 0)
934 return;
935
936 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
937
938 ctx = (struct ale_dmamap_arg *)arg;
939 ctx->ale_busaddr = segs[0].ds_addr;
940 }
941
942 /*
943 * Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register
944 * which specifies high address region of DMA blocks. Therefore these
945 * blocks should have the same high address of given 4GB address
946 * space(i.e. crossing 4GB boundary is not allowed).
947 */
948 static int
949 ale_check_boundary(struct ale_softc *sc)
950 {
951 bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end;
952 bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end;
953
954 rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr +
955 sc->ale_pagesize;
956 rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr +
957 sc->ale_pagesize;
958 tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ;
959 tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ;
960 rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ;
961 rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ;
962
963 if ((ALE_ADDR_HI(tx_ring_end) !=
964 ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) ||
965 (ALE_ADDR_HI(rx_page_end[0]) !=
966 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) ||
967 (ALE_ADDR_HI(rx_page_end[1]) !=
968 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) ||
969 (ALE_ADDR_HI(tx_cmb_end) !=
970 ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) ||
971 (ALE_ADDR_HI(rx_cmb_end[0]) !=
972 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) ||
973 (ALE_ADDR_HI(rx_cmb_end[1]) !=
974 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr)))
975 return (EFBIG);
976
977 if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) ||
978 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) ||
979 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) ||
980 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) ||
981 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end)))
982 return (EFBIG);
983
984 return (0);
985 }
986
987 static int
988 ale_dma_alloc(struct ale_softc *sc)
989 {
990 struct ale_txdesc *txd;
991 bus_addr_t lowaddr;
992 struct ale_dmamap_arg ctx;
993 int error, guard_size, i;
994
995 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
996 guard_size = ALE_JUMBO_FRAMELEN;
997 else
998 guard_size = ALE_MAX_FRAMELEN;
999 sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
1000 ALE_RX_PAGE_ALIGN);
1001 lowaddr = BUS_SPACE_MAXADDR;
1002 again:
1003 /* Create parent DMA tag. */
1004 error = bus_dma_tag_create(
1005 bus_get_dma_tag(sc->ale_dev), /* parent */
1006 1, 0, /* alignment, boundary */
1007 lowaddr, /* lowaddr */
1008 BUS_SPACE_MAXADDR, /* highaddr */
1009 NULL, NULL, /* filter, filterarg */
1010 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1011 0, /* nsegments */
1012 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1013 0, /* flags */
1014 NULL, NULL, /* lockfunc, lockarg */
1015 &sc->ale_cdata.ale_parent_tag);
1016 if (error != 0) {
1017 device_printf(sc->ale_dev,
1018 "could not create parent DMA tag.\n");
1019 goto fail;
1020 }
1021
1022 /* Create DMA tag for Tx descriptor ring. */
1023 error = bus_dma_tag_create(
1024 sc->ale_cdata.ale_parent_tag, /* parent */
1025 ALE_TX_RING_ALIGN, 0, /* alignment, boundary */
1026 BUS_SPACE_MAXADDR, /* lowaddr */
1027 BUS_SPACE_MAXADDR, /* highaddr */
1028 NULL, NULL, /* filter, filterarg */
1029 ALE_TX_RING_SZ, /* maxsize */
1030 1, /* nsegments */
1031 ALE_TX_RING_SZ, /* maxsegsize */
1032 0, /* flags */
1033 NULL, NULL, /* lockfunc, lockarg */
1034 &sc->ale_cdata.ale_tx_ring_tag);
1035 if (error != 0) {
1036 device_printf(sc->ale_dev,
1037 "could not create Tx ring DMA tag.\n");
1038 goto fail;
1039 }
1040
1041 /* Create DMA tag for Rx pages. */
1042 for (i = 0; i < ALE_RX_PAGES; i++) {
1043 error = bus_dma_tag_create(
1044 sc->ale_cdata.ale_parent_tag, /* parent */
1045 ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */
1046 BUS_SPACE_MAXADDR, /* lowaddr */
1047 BUS_SPACE_MAXADDR, /* highaddr */
1048 NULL, NULL, /* filter, filterarg */
1049 sc->ale_pagesize, /* maxsize */
1050 1, /* nsegments */
1051 sc->ale_pagesize, /* maxsegsize */
1052 0, /* flags */
1053 NULL, NULL, /* lockfunc, lockarg */
1054 &sc->ale_cdata.ale_rx_page[i].page_tag);
1055 if (error != 0) {
1056 device_printf(sc->ale_dev,
1057 "could not create Rx page %d DMA tag.\n", i);
1058 goto fail;
1059 }
1060 }
1061
1062 /* Create DMA tag for Tx coalescing message block. */
1063 error = bus_dma_tag_create(
1064 sc->ale_cdata.ale_parent_tag, /* parent */
1065 ALE_CMB_ALIGN, 0, /* alignment, boundary */
1066 BUS_SPACE_MAXADDR, /* lowaddr */
1067 BUS_SPACE_MAXADDR, /* highaddr */
1068 NULL, NULL, /* filter, filterarg */
1069 ALE_TX_CMB_SZ, /* maxsize */
1070 1, /* nsegments */
1071 ALE_TX_CMB_SZ, /* maxsegsize */
1072 0, /* flags */
1073 NULL, NULL, /* lockfunc, lockarg */
1074 &sc->ale_cdata.ale_tx_cmb_tag);
1075 if (error != 0) {
1076 device_printf(sc->ale_dev,
1077 "could not create Tx CMB DMA tag.\n");
1078 goto fail;
1079 }
1080
1081 /* Create DMA tag for Rx coalescing message block. */
1082 for (i = 0; i < ALE_RX_PAGES; i++) {
1083 error = bus_dma_tag_create(
1084 sc->ale_cdata.ale_parent_tag, /* parent */
1085 ALE_CMB_ALIGN, 0, /* alignment, boundary */
1086 BUS_SPACE_MAXADDR, /* lowaddr */
1087 BUS_SPACE_MAXADDR, /* highaddr */
1088 NULL, NULL, /* filter, filterarg */
1089 ALE_RX_CMB_SZ, /* maxsize */
1090 1, /* nsegments */
1091 ALE_RX_CMB_SZ, /* maxsegsize */
1092 0, /* flags */
1093 NULL, NULL, /* lockfunc, lockarg */
1094 &sc->ale_cdata.ale_rx_page[i].cmb_tag);
1095 if (error != 0) {
1096 device_printf(sc->ale_dev,
1097 "could not create Rx page %d CMB DMA tag.\n", i);
1098 goto fail;
1099 }
1100 }
1101
1102 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1103 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag,
1104 (void **)&sc->ale_cdata.ale_tx_ring,
1105 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1106 &sc->ale_cdata.ale_tx_ring_map);
1107 if (error != 0) {
1108 device_printf(sc->ale_dev,
1109 "could not allocate DMA'able memory for Tx ring.\n");
1110 goto fail;
1111 }
1112 ctx.ale_busaddr = 0;
1113 error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag,
1114 sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring,
1115 ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0);
1116 if (error != 0 || ctx.ale_busaddr == 0) {
1117 device_printf(sc->ale_dev,
1118 "could not load DMA'able memory for Tx ring.\n");
1119 goto fail;
1120 }
1121 sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr;
1122
1123 /* Rx pages. */
1124 for (i = 0; i < ALE_RX_PAGES; i++) {
1125 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag,
1126 (void **)&sc->ale_cdata.ale_rx_page[i].page_addr,
1127 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1128 &sc->ale_cdata.ale_rx_page[i].page_map);
1129 if (error != 0) {
1130 device_printf(sc->ale_dev,
1131 "could not allocate DMA'able memory for "
1132 "Rx page %d.\n", i);
1133 goto fail;
1134 }
1135 ctx.ale_busaddr = 0;
1136 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag,
1137 sc->ale_cdata.ale_rx_page[i].page_map,
1138 sc->ale_cdata.ale_rx_page[i].page_addr,
1139 sc->ale_pagesize, ale_dmamap_cb, &ctx, 0);
1140 if (error != 0 || ctx.ale_busaddr == 0) {
1141 device_printf(sc->ale_dev,
1142 "could not load DMA'able memory for "
1143 "Rx page %d.\n", i);
1144 goto fail;
1145 }
1146 sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr;
1147 }
1148
1149 /* Tx CMB. */
1150 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag,
1151 (void **)&sc->ale_cdata.ale_tx_cmb,
1152 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1153 &sc->ale_cdata.ale_tx_cmb_map);
1154 if (error != 0) {
1155 device_printf(sc->ale_dev,
1156 "could not allocate DMA'able memory for Tx CMB.\n");
1157 goto fail;
1158 }
1159 ctx.ale_busaddr = 0;
1160 error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag,
1161 sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb,
1162 ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
1163 if (error != 0 || ctx.ale_busaddr == 0) {
1164 device_printf(sc->ale_dev,
1165 "could not load DMA'able memory for Tx CMB.\n");
1166 goto fail;
1167 }
1168 sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr;
1169
1170 /* Rx CMB. */
1171 for (i = 0; i < ALE_RX_PAGES; i++) {
1172 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag,
1173 (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
1174 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1175 &sc->ale_cdata.ale_rx_page[i].cmb_map);
1176 if (error != 0) {
1177 device_printf(sc->ale_dev, "could not allocate "
1178 "DMA'able memory for Rx page %d CMB.\n", i);
1179 goto fail;
1180 }
1181 ctx.ale_busaddr = 0;
1182 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag,
1183 sc->ale_cdata.ale_rx_page[i].cmb_map,
1184 sc->ale_cdata.ale_rx_page[i].cmb_addr,
1185 ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
1186 if (error != 0 || ctx.ale_busaddr == 0) {
1187 device_printf(sc->ale_dev, "could not load DMA'able "
1188 "memory for Rx page %d CMB.\n", i);
1189 goto fail;
1190 }
1191 sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr;
1192 }
1193
1194 /*
1195 * Tx descriptors/RXF0/CMB DMA blocks share the same
1196 * high address region of 64bit DMA address space.
1197 */
1198 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1199 (error = ale_check_boundary(sc)) != 0) {
1200 device_printf(sc->ale_dev, "4GB boundary crossed, "
1201 "switching to 32bit DMA addressing mode.\n");
1202 ale_dma_free(sc);
1203 /*
1204 * Limit max allowable DMA address space to 32bit
1205 * and try again.
1206 */
1207 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1208 goto again;
1209 }
1210
1211 /*
1212 * Create Tx buffer parent tag.
1213 * AR81xx allows 64bit DMA addressing of Tx buffers so it
1214 * needs separate parent DMA tag as parent DMA address space
1215 * could be restricted to be within 32bit address space by
1216 * 4GB boundary crossing.
1217 */
1218 error = bus_dma_tag_create(
1219 bus_get_dma_tag(sc->ale_dev), /* parent */
1220 1, 0, /* alignment, boundary */
1221 BUS_SPACE_MAXADDR, /* lowaddr */
1222 BUS_SPACE_MAXADDR, /* highaddr */
1223 NULL, NULL, /* filter, filterarg */
1224 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1225 0, /* nsegments */
1226 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1227 0, /* flags */
1228 NULL, NULL, /* lockfunc, lockarg */
1229 &sc->ale_cdata.ale_buffer_tag);
1230 if (error != 0) {
1231 device_printf(sc->ale_dev,
1232 "could not create parent buffer DMA tag.\n");
1233 goto fail;
1234 }
1235
1236 /* Create DMA tag for Tx buffers. */
1237 error = bus_dma_tag_create(
1238 sc->ale_cdata.ale_buffer_tag, /* parent */
1239 1, 0, /* alignment, boundary */
1240 BUS_SPACE_MAXADDR, /* lowaddr */
1241 BUS_SPACE_MAXADDR, /* highaddr */
1242 NULL, NULL, /* filter, filterarg */
1243 ALE_TSO_MAXSIZE, /* maxsize */
1244 ALE_MAXTXSEGS, /* nsegments */
1245 ALE_TSO_MAXSEGSIZE, /* maxsegsize */
1246 0, /* flags */
1247 NULL, NULL, /* lockfunc, lockarg */
1248 &sc->ale_cdata.ale_tx_tag);
1249 if (error != 0) {
1250 device_printf(sc->ale_dev, "could not create Tx DMA tag.\n");
1251 goto fail;
1252 }
1253
1254 /* Create DMA maps for Tx buffers. */
1255 for (i = 0; i < ALE_TX_RING_CNT; i++) {
1256 txd = &sc->ale_cdata.ale_txdesc[i];
1257 txd->tx_m = NULL;
1258 txd->tx_dmamap = NULL;
1259 error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0,
1260 &txd->tx_dmamap);
1261 if (error != 0) {
1262 device_printf(sc->ale_dev,
1263 "could not create Tx dmamap.\n");
1264 goto fail;
1265 }
1266 }
1267
1268 fail:
1269 return (error);
1270 }
1271
1272 static void
1273 ale_dma_free(struct ale_softc *sc)
1274 {
1275 struct ale_txdesc *txd;
1276 int i;
1277
1278 /* Tx buffers. */
1279 if (sc->ale_cdata.ale_tx_tag != NULL) {
1280 for (i = 0; i < ALE_TX_RING_CNT; i++) {
1281 txd = &sc->ale_cdata.ale_txdesc[i];
1282 if (txd->tx_dmamap != NULL) {
1283 bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag,
1284 txd->tx_dmamap);
1285 txd->tx_dmamap = NULL;
1286 }
1287 }
1288 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag);
1289 sc->ale_cdata.ale_tx_tag = NULL;
1290 }
1291 /* Tx descriptor ring. */
1292 if (sc->ale_cdata.ale_tx_ring_tag != NULL) {
1293 if (sc->ale_cdata.ale_tx_ring_map != NULL)
1294 bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag,
1295 sc->ale_cdata.ale_tx_ring_map);
1296 if (sc->ale_cdata.ale_tx_ring_map != NULL &&
1297 sc->ale_cdata.ale_tx_ring != NULL)
1298 bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag,
1299 sc->ale_cdata.ale_tx_ring,
1300 sc->ale_cdata.ale_tx_ring_map);
1301 sc->ale_cdata.ale_tx_ring = NULL;
1302 sc->ale_cdata.ale_tx_ring_map = NULL;
1303 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag);
1304 sc->ale_cdata.ale_tx_ring_tag = NULL;
1305 }
1306 /* Rx page block. */
1307 for (i = 0; i < ALE_RX_PAGES; i++) {
1308 if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) {
1309 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL)
1310 bus_dmamap_unload(
1311 sc->ale_cdata.ale_rx_page[i].page_tag,
1312 sc->ale_cdata.ale_rx_page[i].page_map);
1313 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL &&
1314 sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
1315 bus_dmamem_free(
1316 sc->ale_cdata.ale_rx_page[i].page_tag,
1317 sc->ale_cdata.ale_rx_page[i].page_addr,
1318 sc->ale_cdata.ale_rx_page[i].page_map);
1319 sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
1320 sc->ale_cdata.ale_rx_page[i].page_map = NULL;
1321 bus_dma_tag_destroy(
1322 sc->ale_cdata.ale_rx_page[i].page_tag);
1323 sc->ale_cdata.ale_rx_page[i].page_tag = NULL;
1324 }
1325 }
1326 /* Rx CMB. */
1327 for (i = 0; i < ALE_RX_PAGES; i++) {
1328 if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) {
1329 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL)
1330 bus_dmamap_unload(
1331 sc->ale_cdata.ale_rx_page[i].cmb_tag,
1332 sc->ale_cdata.ale_rx_page[i].cmb_map);
1333 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL &&
1334 sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
1335 bus_dmamem_free(
1336 sc->ale_cdata.ale_rx_page[i].cmb_tag,
1337 sc->ale_cdata.ale_rx_page[i].cmb_addr,
1338 sc->ale_cdata.ale_rx_page[i].cmb_map);
1339 sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
1340 sc->ale_cdata.ale_rx_page[i].cmb_map = NULL;
1341 bus_dma_tag_destroy(
1342 sc->ale_cdata.ale_rx_page[i].cmb_tag);
1343 sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL;
1344 }
1345 }
1346 /* Tx CMB. */
1347 if (sc->ale_cdata.ale_tx_cmb_tag != NULL) {
1348 if (sc->ale_cdata.ale_tx_cmb_map != NULL)
1349 bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag,
1350 sc->ale_cdata.ale_tx_cmb_map);
1351 if (sc->ale_cdata.ale_tx_cmb_map != NULL &&
1352 sc->ale_cdata.ale_tx_cmb != NULL)
1353 bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag,
1354 sc->ale_cdata.ale_tx_cmb,
1355 sc->ale_cdata.ale_tx_cmb_map);
1356 sc->ale_cdata.ale_tx_cmb = NULL;
1357 sc->ale_cdata.ale_tx_cmb_map = NULL;
1358 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag);
1359 sc->ale_cdata.ale_tx_cmb_tag = NULL;
1360 }
1361 if (sc->ale_cdata.ale_buffer_tag != NULL) {
1362 bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag);
1363 sc->ale_cdata.ale_buffer_tag = NULL;
1364 }
1365 if (sc->ale_cdata.ale_parent_tag != NULL) {
1366 bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag);
1367 sc->ale_cdata.ale_parent_tag = NULL;
1368 }
1369 }
1370
1371 static int
1372 ale_shutdown(device_t dev)
1373 {
1374
1375 return (ale_suspend(dev));
1376 }
1377
1378 /*
1379 * Note, this driver resets the link speed to 10/100Mbps by
1380 * restarting auto-negotiation in suspend/shutdown phase but we
1381 * don't know whether that auto-negotiation would succeed or not
1382 * as driver has no control after powering off/suspend operation.
1383 * If the renegotiation fail WOL may not work. Running at 1Gbps
1384 * will draw more power than 375mA at 3.3V which is specified in
1385 * PCI specification and that would result in complete
1386 * shutdowning power to ethernet controller.
1387 *
1388 * TODO
1389 * Save current negotiated media speed/duplex/flow-control to
1390 * softc and restore the same link again after resuming. PHY
1391 * handling such as power down/resetting to 100Mbps may be better
1392 * handled in suspend method in phy driver.
1393 */
1394 static void
1395 ale_setlinkspeed(struct ale_softc *sc)
1396 {
1397 struct mii_data *mii;
1398 int aneg, i;
1399
1400 mii = device_get_softc(sc->ale_miibus);
1401 mii_pollstat(mii);
1402 aneg = 0;
1403 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1404 (IFM_ACTIVE | IFM_AVALID)) {
1405 switch IFM_SUBTYPE(mii->mii_media_active) {
1406 case IFM_10_T:
1407 case IFM_100_TX:
1408 return;
1409 case IFM_1000_T:
1410 aneg++;
1411 break;
1412 default:
1413 break;
1414 }
1415 }
1416 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0);
1417 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
1418 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1419 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
1420 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1421 DELAY(1000);
1422 if (aneg != 0) {
1423 /*
1424 * Poll link state until ale(4) get a 10/100Mbps link.
1425 */
1426 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1427 mii_pollstat(mii);
1428 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1429 == (IFM_ACTIVE | IFM_AVALID)) {
1430 switch (IFM_SUBTYPE(
1431 mii->mii_media_active)) {
1432 case IFM_10_T:
1433 case IFM_100_TX:
1434 ale_mac_config(sc);
1435 return;
1436 default:
1437 break;
1438 }
1439 }
1440 ALE_UNLOCK(sc);
1441 pause("alelnk", hz);
1442 ALE_LOCK(sc);
1443 }
1444 if (i == MII_ANEGTICKS_GIGE)
1445 device_printf(sc->ale_dev,
1446 "establishing a link failed, WOL may not work!");
1447 }
1448 /*
1449 * No link, force MAC to have 100Mbps, full-duplex link.
1450 * This is the last resort and may/may not work.
1451 */
1452 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1453 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1454 ale_mac_config(sc);
1455 }
1456
1457 static void
1458 ale_setwol(struct ale_softc *sc)
1459 {
1460 struct ifnet *ifp;
1461 uint32_t reg, pmcs;
1462 uint16_t pmstat;
1463 int pmc;
1464
1465 ALE_LOCK_ASSERT(sc);
1466
1467 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) != 0) {
1468 /* Disable WOL. */
1469 CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
1470 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
1471 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1472 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
1473 /* Force PHY power down. */
1474 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
1475 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
1476 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON |
1477 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ |
1478 GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW);
1479 return;
1480 }
1481
1482 ifp = sc->ale_ifp;
1483 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1484 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
1485 ale_setlinkspeed(sc);
1486 }
1487
1488 pmcs = 0;
1489 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1490 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1491 CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs);
1492 reg = CSR_READ_4(sc, ALE_MAC_CFG);
1493 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1494 MAC_CFG_BCAST);
1495 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1496 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1497 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1498 reg |= MAC_CFG_RX_ENB;
1499 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1500
1501 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1502 /* WOL disabled, PHY power down. */
1503 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
1504 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1505 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
1506 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
1507 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
1508 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
1509 GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS |
1510 GPHY_CTRL_PWDOWN_HW);
1511 }
1512 /* Request PME. */
1513 pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2);
1514 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1515 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1516 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1517 pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1518 }
1519
1520 static int
1521 ale_suspend(device_t dev)
1522 {
1523 struct ale_softc *sc;
1524
1525 sc = device_get_softc(dev);
1526
1527 ALE_LOCK(sc);
1528 ale_stop(sc);
1529 ale_setwol(sc);
1530 ALE_UNLOCK(sc);
1531
1532 return (0);
1533 }
1534
1535 static int
1536 ale_resume(device_t dev)
1537 {
1538 struct ale_softc *sc;
1539 struct ifnet *ifp;
1540 int pmc;
1541 uint16_t pmstat;
1542
1543 sc = device_get_softc(dev);
1544
1545 ALE_LOCK(sc);
1546 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) == 0) {
1547 /* Disable PME and clear PME status. */
1548 pmstat = pci_read_config(sc->ale_dev,
1549 pmc + PCIR_POWER_STATUS, 2);
1550 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1551 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1552 pci_write_config(sc->ale_dev,
1553 pmc + PCIR_POWER_STATUS, pmstat, 2);
1554 }
1555 }
1556 /* Reset PHY. */
1557 ale_phy_reset(sc);
1558 ifp = sc->ale_ifp;
1559 if ((ifp->if_flags & IFF_UP) != 0) {
1560 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1561 ale_init_locked(sc);
1562 }
1563 ALE_UNLOCK(sc);
1564
1565 return (0);
1566 }
1567
1568 static int
1569 ale_encap(struct ale_softc *sc, struct mbuf **m_head)
1570 {
1571 struct ale_txdesc *txd, *txd_last;
1572 struct tx_desc *desc;
1573 struct mbuf *m;
1574 struct ip *ip;
1575 struct tcphdr *tcp;
1576 bus_dma_segment_t txsegs[ALE_MAXTXSEGS];
1577 bus_dmamap_t map;
1578 uint32_t cflags, hdrlen, ip_off, poff, vtag;
1579 int error, i, nsegs, prod, si;
1580
1581 ALE_LOCK_ASSERT(sc);
1582
1583 M_ASSERTPKTHDR((*m_head));
1584
1585 m = *m_head;
1586 ip = NULL;
1587 tcp = NULL;
1588 cflags = vtag = 0;
1589 ip_off = poff = 0;
1590 if ((m->m_pkthdr.csum_flags & (ALE_CSUM_FEATURES | CSUM_TSO)) != 0) {
1591 /*
1592 * AR81xx requires offset of TCP/UDP payload in its Tx
1593 * descriptor to perform hardware Tx checksum offload.
1594 * Additionally, TSO requires IP/TCP header size and
1595 * modification of IP/TCP header in order to make TSO
1596 * engine work. This kind of operation takes many CPU
1597 * cycles on FreeBSD so fast host CPU is required to
1598 * get smooth TSO performance.
1599 */
1600 struct ether_header *eh;
1601
1602 if (M_WRITABLE(m) == 0) {
1603 /* Get a writable copy. */
1604 m = m_dup(*m_head, M_DONTWAIT);
1605 /* Release original mbufs. */
1606 m_freem(*m_head);
1607 if (m == NULL) {
1608 *m_head = NULL;
1609 return (ENOBUFS);
1610 }
1611 *m_head = m;
1612 }
1613
1614 /*
1615 * Buggy-controller requires 4 byte aligned Tx buffer
1616 * to make custom checksum offload work.
1617 */
1618 if ((sc->ale_flags & ALE_FLAG_TXCSUM_BUG) != 0 &&
1619 (m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0 &&
1620 (mtod(m, intptr_t) & 3) != 0) {
1621 m = m_defrag(*m_head, M_DONTWAIT);
1622 if (m == NULL) {
1623 *m_head = NULL;
1624 return (ENOBUFS);
1625 }
1626 *m_head = m;
1627 }
1628
1629 ip_off = sizeof(struct ether_header);
1630 m = m_pullup(m, ip_off);
1631 if (m == NULL) {
1632 *m_head = NULL;
1633 return (ENOBUFS);
1634 }
1635 eh = mtod(m, struct ether_header *);
1636 /*
1637 * Check if hardware VLAN insertion is off.
1638 * Additional check for LLC/SNAP frame?
1639 */
1640 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1641 ip_off = sizeof(struct ether_vlan_header);
1642 m = m_pullup(m, ip_off);
1643 if (m == NULL) {
1644 *m_head = NULL;
1645 return (ENOBUFS);
1646 }
1647 }
1648 m = m_pullup(m, ip_off + sizeof(struct ip));
1649 if (m == NULL) {
1650 *m_head = NULL;
1651 return (ENOBUFS);
1652 }
1653 ip = (struct ip *)(mtod(m, char *) + ip_off);
1654 poff = ip_off + (ip->ip_hl << 2);
1655 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1656 /*
1657 * XXX
1658 * AR81xx requires the first descriptor should
1659 * not include any TCP playload for TSO case.
1660 * (i.e. ethernet header + IP + TCP header only)
1661 * m_pullup(9) above will ensure this too.
1662 * However it's not correct if the first mbuf
1663 * of the chain does not use cluster.
1664 */
1665 m = m_pullup(m, poff + sizeof(struct tcphdr));
1666 if (m == NULL) {
1667 *m_head = NULL;
1668 return (ENOBUFS);
1669 }
1670 ip = (struct ip *)(mtod(m, char *) + ip_off);
1671 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1672 m = m_pullup(m, poff + (tcp->th_off << 2));
1673 if (m == NULL) {
1674 *m_head = NULL;
1675 return (ENOBUFS);
1676 }
1677 /*
1678 * AR81xx requires IP/TCP header size and offset as
1679 * well as TCP pseudo checksum which complicates
1680 * TSO configuration. I guess this comes from the
1681 * adherence to Microsoft NDIS Large Send
1682 * specification which requires insertion of
1683 * pseudo checksum by upper stack. The pseudo
1684 * checksum that NDIS refers to doesn't include
1685 * TCP payload length so ale(4) should recompute
1686 * the pseudo checksum here. Hopefully this wouldn't
1687 * be much burden on modern CPUs.
1688 * Reset IP checksum and recompute TCP pseudo
1689 * checksum as NDIS specification said.
1690 */
1691 ip->ip_sum = 0;
1692 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1693 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1694 }
1695 *m_head = m;
1696 }
1697
1698 si = prod = sc->ale_cdata.ale_tx_prod;
1699 txd = &sc->ale_cdata.ale_txdesc[prod];
1700 txd_last = txd;
1701 map = txd->tx_dmamap;
1702
1703 error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
1704 *m_head, txsegs, &nsegs, 0);
1705 if (error == EFBIG) {
1706 m = m_collapse(*m_head, M_DONTWAIT, ALE_MAXTXSEGS);
1707 if (m == NULL) {
1708 m_freem(*m_head);
1709 *m_head = NULL;
1710 return (ENOMEM);
1711 }
1712 *m_head = m;
1713 error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
1714 *m_head, txsegs, &nsegs, 0);
1715 if (error != 0) {
1716 m_freem(*m_head);
1717 *m_head = NULL;
1718 return (error);
1719 }
1720 } else if (error != 0)
1721 return (error);
1722 if (nsegs == 0) {
1723 m_freem(*m_head);
1724 *m_head = NULL;
1725 return (EIO);
1726 }
1727
1728 /* Check descriptor overrun. */
1729 if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 3) {
1730 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map);
1731 return (ENOBUFS);
1732 }
1733 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE);
1734
1735 m = *m_head;
1736 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1737 /* Request TSO and set MSS. */
1738 cflags |= ALE_TD_TSO;
1739 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << ALE_TD_MSS_SHIFT);
1740 /* Set IP/TCP header size. */
1741 cflags |= ip->ip_hl << ALE_TD_IPHDR_LEN_SHIFT;
1742 cflags |= tcp->th_off << ALE_TD_TCPHDR_LEN_SHIFT;
1743 } else if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
1744 /*
1745 * AR81xx supports Tx custom checksum offload feature
1746 * that offloads single 16bit checksum computation.
1747 * So you can choose one among IP, TCP and UDP.
1748 * Normally driver sets checksum start/insertion
1749 * position from the information of TCP/UDP frame as
1750 * TCP/UDP checksum takes more time than that of IP.
1751 * However it seems that custom checksum offload
1752 * requires 4 bytes aligned Tx buffers due to hardware
1753 * bug.
1754 * AR81xx also supports explicit Tx checksum computation
1755 * if it is told that the size of IP header and TCP
1756 * header(for UDP, the header size does not matter
1757 * because it's fixed length). However with this scheme
1758 * TSO does not work so you have to choose one either
1759 * TSO or explicit Tx checksum offload. I chosen TSO
1760 * plus custom checksum offload with work-around which
1761 * will cover most common usage for this consumer
1762 * ethernet controller. The work-around takes a lot of
1763 * CPU cycles if Tx buffer is not aligned on 4 bytes
1764 * boundary, though.
1765 */
1766 cflags |= ALE_TD_CXSUM;
1767 /* Set checksum start offset. */
1768 cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
1769 /* Set checksum insertion position of TCP/UDP. */
1770 cflags |= ((poff + m->m_pkthdr.csum_data) <<
1771 ALE_TD_CSUM_XSUMOFFSET_SHIFT);
1772 }
1773
1774 /* Configure VLAN hardware tag insertion. */
1775 if ((m->m_flags & M_VLANTAG) != 0) {
1776 vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1777 vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
1778 cflags |= ALE_TD_INSERT_VLAN_TAG;
1779 }
1780
1781 i = 0;
1782 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1783 /*
1784 * Make sure the first fragment contains
1785 * only ethernet and IP/TCP header with options.
1786 */
1787 hdrlen = poff + (tcp->th_off << 2);
1788 desc = &sc->ale_cdata.ale_tx_ring[prod];
1789 desc->addr = htole64(txsegs[i].ds_addr);
1790 desc->len = htole32(ALE_TX_BYTES(hdrlen) | vtag);
1791 desc->flags = htole32(cflags);
1792 sc->ale_cdata.ale_tx_cnt++;
1793 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1794 if (m->m_len - hdrlen > 0) {
1795 /* Handle remaining payload of the first fragment. */
1796 desc = &sc->ale_cdata.ale_tx_ring[prod];
1797 desc->addr = htole64(txsegs[i].ds_addr + hdrlen);
1798 desc->len = htole32(ALE_TX_BYTES(m->m_len - hdrlen) |
1799 vtag);
1800 desc->flags = htole32(cflags);
1801 sc->ale_cdata.ale_tx_cnt++;
1802 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1803 }
1804 i = 1;
1805 }
1806 for (; i < nsegs; i++) {
1807 desc = &sc->ale_cdata.ale_tx_ring[prod];
1808 desc->addr = htole64(txsegs[i].ds_addr);
1809 desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag);
1810 desc->flags = htole32(cflags);
1811 sc->ale_cdata.ale_tx_cnt++;
1812 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1813 }
1814 /* Update producer index. */
1815 sc->ale_cdata.ale_tx_prod = prod;
1816 /* Set TSO header on the first descriptor. */
1817 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1818 desc = &sc->ale_cdata.ale_tx_ring[si];
1819 desc->flags |= htole32(ALE_TD_TSO_HDR);
1820 }
1821
1822 /* Finally set EOP on the last descriptor. */
1823 prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
1824 desc = &sc->ale_cdata.ale_tx_ring[prod];
1825 desc->flags |= htole32(ALE_TD_EOP);
1826
1827 /* Swap dmamap of the first and the last. */
1828 txd = &sc->ale_cdata.ale_txdesc[prod];
1829 map = txd_last->tx_dmamap;
1830 txd_last->tx_dmamap = txd->tx_dmamap;
1831 txd->tx_dmamap = map;
1832 txd->tx_m = m;
1833
1834 /* Sync descriptors. */
1835 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
1836 sc->ale_cdata.ale_tx_ring_map,
1837 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1838
1839 return (0);
1840 }
1841
1842 static void
1843 ale_start(struct ifnet *ifp)
1844 {
1845 struct ale_softc *sc;
1846
1847 sc = ifp->if_softc;
1848 ALE_LOCK(sc);
1849 ale_start_locked(ifp);
1850 ALE_UNLOCK(sc);
1851 }
1852
1853 static void
1854 ale_start_locked(struct ifnet *ifp)
1855 {
1856 struct ale_softc *sc;
1857 struct mbuf *m_head;
1858 int enq;
1859
1860 sc = ifp->if_softc;
1861
1862 ALE_LOCK_ASSERT(sc);
1863
1864 /* Reclaim transmitted frames. */
1865 if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
1866 ale_txeof(sc);
1867
1868 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1869 IFF_DRV_RUNNING || (sc->ale_flags & ALE_FLAG_LINK) == 0)
1870 return;
1871
1872 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1873 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1874 if (m_head == NULL)
1875 break;
1876 /*
1877 * Pack the data into the transmit ring. If we
1878 * don't have room, set the OACTIVE flag and wait
1879 * for the NIC to drain the ring.
1880 */
1881 if (ale_encap(sc, &m_head)) {
1882 if (m_head == NULL)
1883 break;
1884 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1885 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1886 break;
1887 }
1888
1889 enq++;
1890 /*
1891 * If there's a BPF listener, bounce a copy of this frame
1892 * to him.
1893 */
1894 ETHER_BPF_MTAP(ifp, m_head);
1895 }
1896
1897 if (enq > 0) {
1898 /* Kick. */
1899 CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
1900 sc->ale_cdata.ale_tx_prod);
1901 /* Set a timeout in case the chip goes out to lunch. */
1902 sc->ale_watchdog_timer = ALE_TX_TIMEOUT;
1903 }
1904 }
1905
1906 static void
1907 ale_watchdog(struct ale_softc *sc)
1908 {
1909 struct ifnet *ifp;
1910
1911 ALE_LOCK_ASSERT(sc);
1912
1913 if (sc->ale_watchdog_timer == 0 || --sc->ale_watchdog_timer)
1914 return;
1915
1916 ifp = sc->ale_ifp;
1917 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
1918 if_printf(sc->ale_ifp, "watchdog timeout (lost link)\n");
1919 ifp->if_oerrors++;
1920 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1921 ale_init_locked(sc);
1922 return;
1923 }
1924 if_printf(sc->ale_ifp, "watchdog timeout -- resetting\n");
1925 ifp->if_oerrors++;
1926 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1927 ale_init_locked(sc);
1928 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1929 ale_start_locked(ifp);
1930 }
1931
1932 static int
1933 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1934 {
1935 struct ale_softc *sc;
1936 struct ifreq *ifr;
1937 struct mii_data *mii;
1938 int error, mask;
1939
1940 sc = ifp->if_softc;
1941 ifr = (struct ifreq *)data;
1942 error = 0;
1943 switch (cmd) {
1944 case SIOCSIFMTU:
1945 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU ||
1946 ((sc->ale_flags & ALE_FLAG_JUMBO) == 0 &&
1947 ifr->ifr_mtu > ETHERMTU))
1948 error = EINVAL;
1949 else if (ifp->if_mtu != ifr->ifr_mtu) {
1950 ALE_LOCK(sc);
1951 ifp->if_mtu = ifr->ifr_mtu;
1952 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1953 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1954 ale_init_locked(sc);
1955 }
1956 ALE_UNLOCK(sc);
1957 }
1958 break;
1959 case SIOCSIFFLAGS:
1960 ALE_LOCK(sc);
1961 if ((ifp->if_flags & IFF_UP) != 0) {
1962 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1963 if (((ifp->if_flags ^ sc->ale_if_flags)
1964 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1965 ale_rxfilter(sc);
1966 } else {
1967 ale_init_locked(sc);
1968 }
1969 } else {
1970 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1971 ale_stop(sc);
1972 }
1973 sc->ale_if_flags = ifp->if_flags;
1974 ALE_UNLOCK(sc);
1975 break;
1976 case SIOCADDMULTI:
1977 case SIOCDELMULTI:
1978 ALE_LOCK(sc);
1979 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1980 ale_rxfilter(sc);
1981 ALE_UNLOCK(sc);
1982 break;
1983 case SIOCSIFMEDIA:
1984 case SIOCGIFMEDIA:
1985 mii = device_get_softc(sc->ale_miibus);
1986 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1987 break;
1988 case SIOCSIFCAP:
1989 ALE_LOCK(sc);
1990 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1991 if ((mask & IFCAP_TXCSUM) != 0 &&
1992 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1993 ifp->if_capenable ^= IFCAP_TXCSUM;
1994 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1995 ifp->if_hwassist |= ALE_CSUM_FEATURES;
1996 else
1997 ifp->if_hwassist &= ~ALE_CSUM_FEATURES;
1998 }
1999 if ((mask & IFCAP_RXCSUM) != 0 &&
2000 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2001 ifp->if_capenable ^= IFCAP_RXCSUM;
2002 if ((mask & IFCAP_TSO4) != 0 &&
2003 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2004 ifp->if_capenable ^= IFCAP_TSO4;
2005 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
2006 ifp->if_hwassist |= CSUM_TSO;
2007 else
2008 ifp->if_hwassist &= ~CSUM_TSO;
2009 }
2010
2011 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2012 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2013 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2014 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2015 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2016 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2017 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2018 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2019 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2020 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2021 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2022 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2023 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2024 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2025 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2026 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2027 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
2028 ale_rxvlan(sc);
2029 }
2030 ALE_UNLOCK(sc);
2031 VLAN_CAPABILITIES(ifp);
2032 break;
2033 default:
2034 error = ether_ioctl(ifp, cmd, data);
2035 break;
2036 }
2037
2038 return (error);
2039 }
2040
2041 static void
2042 ale_mac_config(struct ale_softc *sc)
2043 {
2044 struct mii_data *mii;
2045 uint32_t reg;
2046
2047 ALE_LOCK_ASSERT(sc);
2048
2049 mii = device_get_softc(sc->ale_miibus);
2050 reg = CSR_READ_4(sc, ALE_MAC_CFG);
2051 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2052 MAC_CFG_SPEED_MASK);
2053 /* Reprogram MAC with resolved speed/duplex. */
2054 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2055 case IFM_10_T:
2056 case IFM_100_TX:
2057 reg |= MAC_CFG_SPEED_10_100;
2058 break;
2059 case IFM_1000_T:
2060 reg |= MAC_CFG_SPEED_1000;
2061 break;
2062 }
2063 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2064 reg |= MAC_CFG_FULL_DUPLEX;
2065 #ifdef notyet
2066 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2067 reg |= MAC_CFG_TX_FC;
2068 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2069 reg |= MAC_CFG_RX_FC;
2070 #endif
2071 }
2072 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2073 }
2074
2075 static void
2076 ale_link_task(void *arg, int pending)
2077 {
2078 struct ale_softc *sc;
2079 struct mii_data *mii;
2080 struct ifnet *ifp;
2081 uint32_t reg;
2082
2083 sc = (struct ale_softc *)arg;
2084
2085 ALE_LOCK(sc);
2086 mii = device_get_softc(sc->ale_miibus);
2087 ifp = sc->ale_ifp;
2088 if (mii == NULL || ifp == NULL ||
2089 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2090 ALE_UNLOCK(sc);
2091 return;
2092 }
2093
2094 sc->ale_flags &= ~ALE_FLAG_LINK;
2095 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2096 (IFM_ACTIVE | IFM_AVALID)) {
2097 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2098 case IFM_10_T:
2099 case IFM_100_TX:
2100 sc->ale_flags |= ALE_FLAG_LINK;
2101 break;
2102 case IFM_1000_T:
2103 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
2104 sc->ale_flags |= ALE_FLAG_LINK;
2105 break;
2106 default:
2107 break;
2108 }
2109 }
2110
2111 /* Stop Rx/Tx MACs. */
2112 ale_stop_mac(sc);
2113
2114 /* Program MACs with resolved speed/duplex/flow-control. */
2115 if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
2116 ale_mac_config(sc);
2117 /* Reenable Tx/Rx MACs. */
2118 reg = CSR_READ_4(sc, ALE_MAC_CFG);
2119 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2120 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2121 }
2122
2123 ALE_UNLOCK(sc);
2124 }
2125
2126 static void
2127 ale_stats_clear(struct ale_softc *sc)
2128 {
2129 struct smb sb;
2130 uint32_t *reg;
2131 int i;
2132
2133 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
2134 CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
2135 i += sizeof(uint32_t);
2136 }
2137 /* Read Tx statistics. */
2138 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
2139 CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
2140 i += sizeof(uint32_t);
2141 }
2142 }
2143
2144 static void
2145 ale_stats_update(struct ale_softc *sc)
2146 {
2147 struct ale_hw_stats *stat;
2148 struct smb sb, *smb;
2149 struct ifnet *ifp;
2150 uint32_t *reg;
2151 int i;
2152
2153 ALE_LOCK_ASSERT(sc);
2154
2155 ifp = sc->ale_ifp;
2156 stat = &sc->ale_stats;
2157 smb = &sb;
2158
2159 /* Read Rx statistics. */
2160 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
2161 *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
2162 i += sizeof(uint32_t);
2163 }
2164 /* Read Tx statistics. */
2165 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
2166 *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
2167 i += sizeof(uint32_t);
2168 }
2169
2170 /* Rx stats. */
2171 stat->rx_frames += smb->rx_frames;
2172 stat->rx_bcast_frames += smb->rx_bcast_frames;
2173 stat->rx_mcast_frames += smb->rx_mcast_frames;
2174 stat->rx_pause_frames += smb->rx_pause_frames;
2175 stat->rx_control_frames += smb->rx_control_frames;
2176 stat->rx_crcerrs += smb->rx_crcerrs;
2177 stat->rx_lenerrs += smb->rx_lenerrs;
2178 stat->rx_bytes += smb->rx_bytes;
2179 stat->rx_runts += smb->rx_runts;
2180 stat->rx_fragments += smb->rx_fragments;
2181 stat->rx_pkts_64 += smb->rx_pkts_64;
2182 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2183 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2184 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2185 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2186 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2187 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2188 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2189 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2190 stat->rx_rrs_errs += smb->rx_rrs_errs;
2191 stat->rx_alignerrs += smb->rx_alignerrs;
2192 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2193 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2194 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2195
2196 /* Tx stats. */
2197 stat->tx_frames += smb->tx_frames;
2198 stat->tx_bcast_frames += smb->tx_bcast_frames;
2199 stat->tx_mcast_frames += smb->tx_mcast_frames;
2200 stat->tx_pause_frames += smb->tx_pause_frames;
2201 stat->tx_excess_defer += smb->tx_excess_defer;
2202 stat->tx_control_frames += smb->tx_control_frames;
2203 stat->tx_deferred += smb->tx_deferred;
2204 stat->tx_bytes += smb->tx_bytes;
2205 stat->tx_pkts_64 += smb->tx_pkts_64;
2206 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2207 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2208 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2209 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2210 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2211 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2212 stat->tx_single_colls += smb->tx_single_colls;
2213 stat->tx_multi_colls += smb->tx_multi_colls;
2214 stat->tx_late_colls += smb->tx_late_colls;
2215 stat->tx_excess_colls += smb->tx_excess_colls;
2216 stat->tx_abort += smb->tx_abort;
2217 stat->tx_underrun += smb->tx_underrun;
2218 stat->tx_desc_underrun += smb->tx_desc_underrun;
2219 stat->tx_lenerrs += smb->tx_lenerrs;
2220 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2221 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2222 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2223
2224 /* Update counters in ifnet. */
2225 ifp->if_opackets += smb->tx_frames;
2226
2227 ifp->if_collisions += smb->tx_single_colls +
2228 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2229 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2230
2231 /*
2232 * XXX
2233 * tx_pkts_truncated counter looks suspicious. It constantly
2234 * increments with no sign of Tx errors. This may indicate
2235 * the counter name is not correct one so I've removed the
2236 * counter in output errors.
2237 */
2238 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2239 smb->tx_underrun;
2240
2241 ifp->if_ipackets += smb->rx_frames;
2242
2243 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2244 smb->rx_runts + smb->rx_pkts_truncated +
2245 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2246 smb->rx_alignerrs;
2247 }
2248
2249 static int
2250 ale_intr(void *arg)
2251 {
2252 struct ale_softc *sc;
2253 uint32_t status;
2254
2255 sc = (struct ale_softc *)arg;
2256
2257 status = CSR_READ_4(sc, ALE_INTR_STATUS);
2258 if ((status & ALE_INTRS) == 0)
2259 return (FILTER_STRAY);
2260 /* Disable interrupts. */
2261 CSR_WRITE_4(sc, ALE_INTR_STATUS, INTR_DIS_INT);
2262 taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
2263
2264 return (FILTER_HANDLED);
2265 }
2266
2267 static void
2268 ale_int_task(void *arg, int pending)
2269 {
2270 struct ale_softc *sc;
2271 struct ifnet *ifp;
2272 uint32_t status;
2273 int more;
2274
2275 sc = (struct ale_softc *)arg;
2276
2277 status = CSR_READ_4(sc, ALE_INTR_STATUS);
2278 ALE_LOCK(sc);
2279 if (sc->ale_morework != 0)
2280 status |= INTR_RX_PKT;
2281 if ((status & ALE_INTRS) == 0)
2282 goto done;
2283
2284 /* Acknowledge interrupts but still disable interrupts. */
2285 CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
2286
2287 ifp = sc->ale_ifp;
2288 more = 0;
2289 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2290 more = ale_rxeof(sc, sc->ale_process_limit);
2291 if (more == EAGAIN)
2292 sc->ale_morework = 1;
2293 else if (more == EIO) {
2294 sc->ale_stats.reset_brk_seq++;
2295 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2296 ale_init_locked(sc);
2297 ALE_UNLOCK(sc);
2298 return;
2299 }
2300
2301 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
2302 if ((status & INTR_DMA_RD_TO_RST) != 0)
2303 device_printf(sc->ale_dev,
2304 "DMA read error! -- resetting\n");
2305 if ((status & INTR_DMA_WR_TO_RST) != 0)
2306 device_printf(sc->ale_dev,
2307 "DMA write error! -- resetting\n");
2308 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2309 ale_init_locked(sc);
2310 ALE_UNLOCK(sc);
2311 return;
2312 }
2313 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2314 ale_start_locked(ifp);
2315 }
2316
2317 if (more == EAGAIN ||
2318 (CSR_READ_4(sc, ALE_INTR_STATUS) & ALE_INTRS) != 0) {
2319 ALE_UNLOCK(sc);
2320 taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
2321 return;
2322 }
2323
2324 done:
2325 ALE_UNLOCK(sc);
2326
2327 /* Re-enable interrupts. */
2328 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
2329 }
2330
2331 static void
2332 ale_txeof(struct ale_softc *sc)
2333 {
2334 struct ifnet *ifp;
2335 struct ale_txdesc *txd;
2336 uint32_t cons, prod;
2337 int prog;
2338
2339 ALE_LOCK_ASSERT(sc);
2340
2341 ifp = sc->ale_ifp;
2342
2343 if (sc->ale_cdata.ale_tx_cnt == 0)
2344 return;
2345
2346 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
2347 sc->ale_cdata.ale_tx_ring_map,
2348 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2349 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
2350 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
2351 sc->ale_cdata.ale_tx_cmb_map,
2352 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2353 prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
2354 } else
2355 prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
2356 cons = sc->ale_cdata.ale_tx_cons;
2357 /*
2358 * Go through our Tx list and free mbufs for those
2359 * frames which have been transmitted.
2360 */
2361 for (prog = 0; cons != prod; prog++,
2362 ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
2363 if (sc->ale_cdata.ale_tx_cnt <= 0)
2364 break;
2365 prog++;
2366 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2367 sc->ale_cdata.ale_tx_cnt--;
2368 txd = &sc->ale_cdata.ale_txdesc[cons];
2369 if (txd->tx_m != NULL) {
2370 /* Reclaim transmitted mbufs. */
2371 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
2372 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2373 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
2374 txd->tx_dmamap);
2375 m_freem(txd->tx_m);
2376 txd->tx_m = NULL;
2377 }
2378 }
2379
2380 if (prog > 0) {
2381 sc->ale_cdata.ale_tx_cons = cons;
2382 /*
2383 * Unarm watchdog timer only when there is no pending
2384 * Tx descriptors in queue.
2385 */
2386 if (sc->ale_cdata.ale_tx_cnt == 0)
2387 sc->ale_watchdog_timer = 0;
2388 }
2389 }
2390
2391 static void
2392 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
2393 uint32_t length, uint32_t *prod)
2394 {
2395 struct ale_rx_page *rx_page;
2396
2397 rx_page = *page;
2398 /* Update consumer position. */
2399 rx_page->cons += roundup(length + sizeof(struct rx_rs),
2400 ALE_RX_PAGE_ALIGN);
2401 if (rx_page->cons >= ALE_RX_PAGE_SZ) {
2402 /*
2403 * End of Rx page reached, let hardware reuse
2404 * this page.
2405 */
2406 rx_page->cons = 0;
2407 *rx_page->cmb_addr = 0;
2408 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2410 CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
2411 RXF_VALID);
2412 /* Switch to alternate Rx page. */
2413 sc->ale_cdata.ale_rx_curp ^= 1;
2414 rx_page = *page =
2415 &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
2416 /* Page flipped, sync CMB and Rx page. */
2417 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
2418 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2419 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2420 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2421 /* Sync completed, cache updated producer index. */
2422 *prod = *rx_page->cmb_addr;
2423 }
2424 }
2425
2426
2427 /*
2428 * It seems that AR81xx controller can compute partial checksum.
2429 * The partial checksum value can be used to accelerate checksum
2430 * computation for fragmented TCP/UDP packets. Upper network stack
2431 * already takes advantage of the partial checksum value in IP
2432 * reassembly stage. But I'm not sure the correctness of the
2433 * partial hardware checksum assistance due to lack of data sheet.
2434 * In addition, the Rx feature of controller that requires copying
2435 * for every frames effectively nullifies one of most nice offload
2436 * capability of controller.
2437 */
2438 static void
2439 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
2440 {
2441 struct ifnet *ifp;
2442 struct ip *ip;
2443 char *p;
2444
2445 ifp = sc->ale_ifp;
2446 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2447 if ((status & ALE_RD_IPCSUM_NOK) == 0)
2448 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2449
2450 if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
2451 if (((status & ALE_RD_IPV4_FRAG) == 0) &&
2452 ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
2453 ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
2454 m->m_pkthdr.csum_flags |=
2455 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2456 m->m_pkthdr.csum_data = 0xffff;
2457 }
2458 } else {
2459 if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
2460 (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
2461 p = mtod(m, char *);
2462 p += ETHER_HDR_LEN;
2463 if ((status & ALE_RD_802_3) != 0)
2464 p += LLC_SNAPFRAMELEN;
2465 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 &&
2466 (status & ALE_RD_VLAN) != 0)
2467 p += ETHER_VLAN_ENCAP_LEN;
2468 ip = (struct ip *)p;
2469 if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
2470 return;
2471 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2472 CSUM_PSEUDO_HDR;
2473 m->m_pkthdr.csum_data = 0xffff;
2474 }
2475 }
2476 /*
2477 * Don't mark bad checksum for TCP/UDP frames
2478 * as fragmented frames may always have set
2479 * bad checksummed bit of frame status.
2480 */
2481 }
2482
2483 /* Process received frames. */
2484 static int
2485 ale_rxeof(struct ale_softc *sc, int count)
2486 {
2487 struct ale_rx_page *rx_page;
2488 struct rx_rs *rs;
2489 struct ifnet *ifp;
2490 struct mbuf *m;
2491 uint32_t length, prod, seqno, status, vtags;
2492 int prog;
2493
2494 ifp = sc->ale_ifp;
2495 rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
2496 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2497 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2498 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
2499 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2500 /*
2501 * Don't directly access producer index as hardware may
2502 * update it while Rx handler is in progress. It would
2503 * be even better if there is a way to let hardware
2504 * know how far driver processed its received frames.
2505 * Alternatively, hardware could provide a way to disable
2506 * CMB updates until driver acknowledges the end of CMB
2507 * access.
2508 */
2509 prod = *rx_page->cmb_addr;
2510 for (prog = 0; prog < count; prog++) {
2511 if (rx_page->cons >= prod)
2512 break;
2513 rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
2514 seqno = ALE_RX_SEQNO(le32toh(rs->seqno));
2515 if (sc->ale_cdata.ale_rx_seqno != seqno) {
2516 /*
2517 * Normally I believe this should not happen unless
2518 * severe driver bug or corrupted memory. However
2519 * it seems to happen under certain conditions which
2520 * is triggered by abrupt Rx events such as initiation
2521 * of bulk transfer of remote host. It's not easy to
2522 * reproduce this and I doubt it could be related
2523 * with FIFO overflow of hardware or activity of Tx
2524 * CMB updates. I also remember similar behaviour
2525 * seen on RealTek 8139 which uses resembling Rx
2526 * scheme.
2527 */
2528 if (bootverbose)
2529 device_printf(sc->ale_dev,
2530 "garbled seq: %u, expected: %u -- "
2531 "resetting!\n", seqno,
2532 sc->ale_cdata.ale_rx_seqno);
2533 return (EIO);
2534 }
2535 /* Frame received. */
2536 sc->ale_cdata.ale_rx_seqno++;
2537 length = ALE_RX_BYTES(le32toh(rs->length));
2538 status = le32toh(rs->flags);
2539 if ((status & ALE_RD_ERROR) != 0) {
2540 /*
2541 * We want to pass the following frames to upper
2542 * layer regardless of error status of Rx return
2543 * status.
2544 *
2545 * o IP/TCP/UDP checksum is bad.
2546 * o frame length and protocol specific length
2547 * does not match.
2548 */
2549 if ((status & (ALE_RD_CRC | ALE_RD_CODE |
2550 ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
2551 ALE_RD_TRUNC)) != 0) {
2552 ale_rx_update_page(sc, &rx_page, length, &prod);
2553 continue;
2554 }
2555 }
2556 /*
2557 * m_devget(9) is major bottle-neck of ale(4)(It comes
2558 * from hardware limitation). For jumbo frames we could
2559 * get a slightly better performance if driver use
2560 * m_getjcl(9) with proper buffer size argument. However
2561 * that would make code more complicated and I don't
2562 * think users would expect good Rx performance numbers
2563 * on these low-end consumer ethernet controller.
2564 */
2565 m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
2566 ETHER_ALIGN, ifp, NULL);
2567 if (m == NULL) {
2568 ifp->if_iqdrops++;
2569 ale_rx_update_page(sc, &rx_page, length, &prod);
2570 continue;
2571 }
2572 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2573 (status & ALE_RD_IPV4) != 0)
2574 ale_rxcsum(sc, m, status);
2575 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2576 (status & ALE_RD_VLAN) != 0) {
2577 vtags = ALE_RX_VLAN(le32toh(rs->vtags));
2578 m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
2579 m->m_flags |= M_VLANTAG;
2580 }
2581
2582 /* Pass it to upper layer. */
2583 ALE_UNLOCK(sc);
2584 (*ifp->if_input)(ifp, m);
2585 ALE_LOCK(sc);
2586
2587 ale_rx_update_page(sc, &rx_page, length, &prod);
2588 }
2589
2590 return (count > 0 ? 0 : EAGAIN);
2591 }
2592
2593 static void
2594 ale_tick(void *arg)
2595 {
2596 struct ale_softc *sc;
2597 struct mii_data *mii;
2598
2599 sc = (struct ale_softc *)arg;
2600
2601 ALE_LOCK_ASSERT(sc);
2602
2603 mii = device_get_softc(sc->ale_miibus);
2604 mii_tick(mii);
2605 ale_stats_update(sc);
2606 /*
2607 * Reclaim Tx buffers that have been transferred. It's not
2608 * needed here but it would release allocated mbuf chains
2609 * faster and limit the maximum delay to a hz.
2610 */
2611 ale_txeof(sc);
2612 ale_watchdog(sc);
2613 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
2614 }
2615
2616 static void
2617 ale_reset(struct ale_softc *sc)
2618 {
2619 uint32_t reg;
2620 int i;
2621
2622 /* Initialize PCIe module. From Linux. */
2623 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2624
2625 CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
2626 for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
2627 DELAY(10);
2628 if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
2629 break;
2630 }
2631 if (i == 0)
2632 device_printf(sc->ale_dev, "master reset timeout!\n");
2633
2634 for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
2635 if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
2636 break;
2637 DELAY(10);
2638 }
2639
2640 if (i == 0)
2641 device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg);
2642 }
2643
2644 static void
2645 ale_init(void *xsc)
2646 {
2647 struct ale_softc *sc;
2648
2649 sc = (struct ale_softc *)xsc;
2650 ALE_LOCK(sc);
2651 ale_init_locked(sc);
2652 ALE_UNLOCK(sc);
2653 }
2654
2655 static void
2656 ale_init_locked(struct ale_softc *sc)
2657 {
2658 struct ifnet *ifp;
2659 struct mii_data *mii;
2660 uint8_t eaddr[ETHER_ADDR_LEN];
2661 bus_addr_t paddr;
2662 uint32_t reg, rxf_hi, rxf_lo;
2663
2664 ALE_LOCK_ASSERT(sc);
2665
2666 ifp = sc->ale_ifp;
2667 mii = device_get_softc(sc->ale_miibus);
2668
2669 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2670 return;
2671 /*
2672 * Cancel any pending I/O.
2673 */
2674 ale_stop(sc);
2675 /*
2676 * Reset the chip to a known state.
2677 */
2678 ale_reset(sc);
2679 /* Initialize Tx descriptors, DMA memory blocks. */
2680 ale_init_rx_pages(sc);
2681 ale_init_tx_ring(sc);
2682
2683 /* Reprogram the station address. */
2684 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2685 CSR_WRITE_4(sc, ALE_PAR0,
2686 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2687 CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
2688 /*
2689 * Clear WOL status and disable all WOL feature as WOL
2690 * would interfere Rx operation under normal environments.
2691 */
2692 CSR_READ_4(sc, ALE_WOL_CFG);
2693 CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
2694 /*
2695 * Set Tx descriptor/RXF0/CMB base addresses. They share
2696 * the same high address part of DMAable region.
2697 */
2698 paddr = sc->ale_cdata.ale_tx_ring_paddr;
2699 CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
2700 CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
2701 CSR_WRITE_4(sc, ALE_TPD_CNT,
2702 (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
2703 /* Set Rx page base address, note we use single queue. */
2704 paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
2705 CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
2706 paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
2707 CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
2708 /* Set Tx/Rx CMB addresses. */
2709 paddr = sc->ale_cdata.ale_tx_cmb_paddr;
2710 CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
2711 paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
2712 CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
2713 paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
2714 CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
2715 /* Mark RXF0 is valid. */
2716 CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
2717 CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
2718 /*
2719 * No need to initialize RFX1/RXF2/RXF3. We don't use
2720 * multi-queue yet.
2721 */
2722
2723 /* Set Rx page size, excluding guard frame size. */
2724 CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
2725 /* Tell hardware that we're ready to load DMA blocks. */
2726 CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
2727
2728 /* Set Rx/Tx interrupt trigger threshold. */
2729 CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
2730 (4 << INT_TRIG_TX_THRESH_SHIFT));
2731 /*
2732 * XXX
2733 * Set interrupt trigger timer, its purpose and relation
2734 * with interrupt moderation mechanism is not clear yet.
2735 */
2736 CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
2737 ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
2738 (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
2739
2740 /* Configure interrupt moderation timer. */
2741 reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
2742 reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
2743 CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
2744 reg = CSR_READ_4(sc, ALE_MASTER_CFG);
2745 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2746 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2747 if (ALE_USECS(sc->ale_int_rx_mod) != 0)
2748 reg |= MASTER_IM_RX_TIMER_ENB;
2749 if (ALE_USECS(sc->ale_int_tx_mod) != 0)
2750 reg |= MASTER_IM_TX_TIMER_ENB;
2751 CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
2752 CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
2753
2754 /* Set Maximum frame size of controller. */
2755 if (ifp->if_mtu < ETHERMTU)
2756 sc->ale_max_frame_size = ETHERMTU;
2757 else
2758 sc->ale_max_frame_size = ifp->if_mtu;
2759 sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2760 ETHER_CRC_LEN;
2761 CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
2762 /* Configure IPG/IFG parameters. */
2763 CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
2764 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2765 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2766 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2767 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2768 /* Set parameters for half-duplex media. */
2769 CSR_WRITE_4(sc, ALE_HDPX_CFG,
2770 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2771 HDPX_CFG_LCOL_MASK) |
2772 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2773 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2774 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2775 HDPX_CFG_ABEBT_MASK) |
2776 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2777 HDPX_CFG_JAMIPG_MASK));
2778
2779 /* Configure Tx jumbo frame parameters. */
2780 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
2781 if (ifp->if_mtu < ETHERMTU)
2782 reg = sc->ale_max_frame_size;
2783 else if (ifp->if_mtu < 6 * 1024)
2784 reg = (sc->ale_max_frame_size * 2) / 3;
2785 else
2786 reg = sc->ale_max_frame_size / 2;
2787 CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
2788 roundup(reg, TX_JUMBO_THRESH_UNIT) >>
2789 TX_JUMBO_THRESH_UNIT_SHIFT);
2790 }
2791 /* Configure TxQ. */
2792 reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
2793 << TXQ_CFG_TX_FIFO_BURST_SHIFT;
2794 reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2795 TXQ_CFG_TPD_BURST_MASK;
2796 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
2797
2798 /* Configure Rx jumbo frame & flow control parameters. */
2799 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
2800 reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
2801 CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
2802 (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
2803 RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
2804 ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
2805 RX_JUMBO_LKAH_MASK));
2806 reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
2807 rxf_hi = (reg * 7) / 10;
2808 rxf_lo = (reg * 3)/ 10;
2809 CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
2810 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2811 RX_FIFO_PAUSE_THRESH_LO_MASK) |
2812 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2813 RX_FIFO_PAUSE_THRESH_HI_MASK));
2814 }
2815
2816 /* Disable RSS. */
2817 CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
2818 CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
2819
2820 /* Configure RxQ. */
2821 CSR_WRITE_4(sc, ALE_RXQ_CFG,
2822 RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2823
2824 /* Configure DMA parameters. */
2825 reg = 0;
2826 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
2827 reg |= DMA_CFG_TXCMB_ENB;
2828 CSR_WRITE_4(sc, ALE_DMA_CFG,
2829 DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
2830 sc->ale_dma_rd_burst | reg |
2831 sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
2832 ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2833 DMA_CFG_RD_DELAY_CNT_MASK) |
2834 ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2835 DMA_CFG_WR_DELAY_CNT_MASK));
2836
2837 /*
2838 * Hardware can be configured to issue SMB interrupt based
2839 * on programmed interval. Since there is a callout that is
2840 * invoked for every hz in driver we use that instead of
2841 * relying on periodic SMB interrupt.
2842 */
2843 CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
2844 /* Clear MAC statistics. */
2845 ale_stats_clear(sc);
2846
2847 /*
2848 * Configure Tx/Rx MACs.
2849 * - Auto-padding for short frames.
2850 * - Enable CRC generation.
2851 * Actual reconfiguration of MAC for resolved speed/duplex
2852 * is followed after detection of link establishment.
2853 * AR81xx always does checksum computation regardless of
2854 * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
2855 * cause Rx handling issue for fragmented IP datagrams due
2856 * to silicon bug.
2857 */
2858 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2859 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2860 MAC_CFG_PREAMBLE_MASK);
2861 if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
2862 reg |= MAC_CFG_SPEED_10_100;
2863 else
2864 reg |= MAC_CFG_SPEED_1000;
2865 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2866
2867 /* Set up the receive filter. */
2868 ale_rxfilter(sc);
2869 ale_rxvlan(sc);
2870
2871 /* Acknowledge all pending interrupts and clear it. */
2872 CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
2873 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2874 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
2875
2876 sc->ale_flags &= ~ALE_FLAG_LINK;
2877 /* Switch to the current media. */
2878 mii_mediachg(mii);
2879
2880 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
2881
2882 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2883 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2884 }
2885
2886 static void
2887 ale_stop(struct ale_softc *sc)
2888 {
2889 struct ifnet *ifp;
2890 struct ale_txdesc *txd;
2891 uint32_t reg;
2892 int i;
2893
2894 ALE_LOCK_ASSERT(sc);
2895 /*
2896 * Mark the interface down and cancel the watchdog timer.
2897 */
2898 ifp = sc->ale_ifp;
2899 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2900 sc->ale_flags &= ~ALE_FLAG_LINK;
2901 callout_stop(&sc->ale_tick_ch);
2902 sc->ale_watchdog_timer = 0;
2903 ale_stats_update(sc);
2904 /* Disable interrupts. */
2905 CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
2906 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2907 /* Disable queue processing and DMA. */
2908 reg = CSR_READ_4(sc, ALE_TXQ_CFG);
2909 reg &= ~TXQ_CFG_ENB;
2910 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
2911 reg = CSR_READ_4(sc, ALE_RXQ_CFG);
2912 reg &= ~RXQ_CFG_ENB;
2913 CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
2914 reg = CSR_READ_4(sc, ALE_DMA_CFG);
2915 reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
2916 CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
2917 DELAY(1000);
2918 /* Stop Rx/Tx MACs. */
2919 ale_stop_mac(sc);
2920 /* Disable interrupts which might be touched in taskq handler. */
2921 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2922
2923 /*
2924 * Free TX mbufs still in the queues.
2925 */
2926 for (i = 0; i < ALE_TX_RING_CNT; i++) {
2927 txd = &sc->ale_cdata.ale_txdesc[i];
2928 if (txd->tx_m != NULL) {
2929 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
2930 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2931 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
2932 txd->tx_dmamap);
2933 m_freem(txd->tx_m);
2934 txd->tx_m = NULL;
2935 }
2936 }
2937 }
2938
2939 static void
2940 ale_stop_mac(struct ale_softc *sc)
2941 {
2942 uint32_t reg;
2943 int i;
2944
2945 ALE_LOCK_ASSERT(sc);
2946
2947 reg = CSR_READ_4(sc, ALE_MAC_CFG);
2948 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2949 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2950 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2951 }
2952
2953 for (i = ALE_TIMEOUT; i > 0; i--) {
2954 reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
2955 if (reg == 0)
2956 break;
2957 DELAY(10);
2958 }
2959 if (i == 0)
2960 device_printf(sc->ale_dev,
2961 "could not disable Tx/Rx MAC(0x%08x)!\n", reg);
2962 }
2963
2964 static void
2965 ale_init_tx_ring(struct ale_softc *sc)
2966 {
2967 struct ale_txdesc *txd;
2968 int i;
2969
2970 ALE_LOCK_ASSERT(sc);
2971
2972 sc->ale_cdata.ale_tx_prod = 0;
2973 sc->ale_cdata.ale_tx_cons = 0;
2974 sc->ale_cdata.ale_tx_cnt = 0;
2975
2976 bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
2977 bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
2978 for (i = 0; i < ALE_TX_RING_CNT; i++) {
2979 txd = &sc->ale_cdata.ale_txdesc[i];
2980 txd->tx_m = NULL;
2981 }
2982 *sc->ale_cdata.ale_tx_cmb = 0;
2983 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
2984 sc->ale_cdata.ale_tx_cmb_map,
2985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2986 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
2987 sc->ale_cdata.ale_tx_ring_map,
2988 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2989 }
2990
2991 static void
2992 ale_init_rx_pages(struct ale_softc *sc)
2993 {
2994 struct ale_rx_page *rx_page;
2995 int i;
2996
2997 ALE_LOCK_ASSERT(sc);
2998
2999 sc->ale_morework = 0;
3000 sc->ale_cdata.ale_rx_seqno = 0;
3001 sc->ale_cdata.ale_rx_curp = 0;
3002
3003 for (i = 0; i < ALE_RX_PAGES; i++) {
3004 rx_page = &sc->ale_cdata.ale_rx_page[i];
3005 bzero(rx_page->page_addr, sc->ale_pagesize);
3006 bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
3007 rx_page->cons = 0;
3008 *rx_page->cmb_addr = 0;
3009 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
3010 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3011 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
3012 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3013 }
3014 }
3015
3016 static void
3017 ale_rxvlan(struct ale_softc *sc)
3018 {
3019 struct ifnet *ifp;
3020 uint32_t reg;
3021
3022 ALE_LOCK_ASSERT(sc);
3023
3024 ifp = sc->ale_ifp;
3025 reg = CSR_READ_4(sc, ALE_MAC_CFG);
3026 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3027 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3028 reg |= MAC_CFG_VLAN_TAG_STRIP;
3029 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
3030 }
3031
3032 static void
3033 ale_rxfilter(struct ale_softc *sc)
3034 {
3035 struct ifnet *ifp;
3036 struct ifmultiaddr *ifma;
3037 uint32_t crc;
3038 uint32_t mchash[2];
3039 uint32_t rxcfg;
3040
3041 ALE_LOCK_ASSERT(sc);
3042
3043 ifp = sc->ale_ifp;
3044
3045 rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
3046 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3047 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3048 rxcfg |= MAC_CFG_BCAST;
3049 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3050 if ((ifp->if_flags & IFF_PROMISC) != 0)
3051 rxcfg |= MAC_CFG_PROMISC;
3052 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3053 rxcfg |= MAC_CFG_ALLMULTI;
3054 CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF);
3055 CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF);
3056 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
3057 return;
3058 }
3059
3060 /* Program new filter. */
3061 bzero(mchash, sizeof(mchash));
3062
3063 if_maddr_rlock(ifp);
3064 TAILQ_FOREACH(ifma, &sc->ale_ifp->if_multiaddrs, ifma_link) {
3065 if (ifma->ifma_addr->sa_family != AF_LINK)
3066 continue;
3067 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3068 ifma->ifma_addr), ETHER_ADDR_LEN);
3069 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3070 }
3071 if_maddr_runlock(ifp);
3072
3073 CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
3074 CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
3075 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
3076 }
3077
3078 static int
3079 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3080 {
3081 int error, value;
3082
3083 if (arg1 == NULL)
3084 return (EINVAL);
3085 value = *(int *)arg1;
3086 error = sysctl_handle_int(oidp, &value, 0, req);
3087 if (error || req->newptr == NULL)
3088 return (error);
3089 if (value < low || value > high)
3090 return (EINVAL);
3091 *(int *)arg1 = value;
3092
3093 return (0);
3094 }
3095
3096 static int
3097 sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS)
3098 {
3099 return (sysctl_int_range(oidp, arg1, arg2, req,
3100 ALE_PROC_MIN, ALE_PROC_MAX));
3101 }
3102
3103 static int
3104 sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS)
3105 {
3106
3107 return (sysctl_int_range(oidp, arg1, arg2, req,
3108 ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX));
3109 }
Cache object: ce6bdf9179f637d392edbefb322be3a1
|