FreeBSD/Linux Kernel Cross Reference
sys/dev/netif/ae/if_ae.c
1 /*-
2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
26 *
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
28 *
29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
30 */
31
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/rman.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/bpf.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
54
55 #include <bus/pci/pcireg.h>
56 #include <bus/pci/pcivar.h>
57 #include "pcidevs.h"
58
59 #include <dev/netif/mii_layer/miivar.h>
60
61 #include <dev/netif/ae/if_aereg.h>
62 #include <dev/netif/ae/if_aevar.h>
63
64 /* "device miibus" required. See GENERIC if you get errors here. */
65 #include "miibus_if.h"
66
67 /*
68 * Devices supported by this driver.
69 */
70 static const struct ae_dev {
71 uint16_t ae_vendorid;
72 uint16_t ae_deviceid;
73 const char *ae_name;
74 } ae_devs[] = {
75 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
76 "Attansic Technology Corp, L2 Fast Ethernet" },
77 /* Required last entry */
78 { 0, 0, NULL }
79 };
80
81
82 static int ae_probe(device_t);
83 static int ae_attach(device_t);
84 static int ae_detach(device_t);
85 static int ae_shutdown(device_t);
86 static int ae_suspend(device_t);
87 static int ae_resume(device_t);
88 static int ae_miibus_readreg(device_t, int, int);
89 static int ae_miibus_writereg(device_t, int, int, int);
90 static void ae_miibus_statchg(device_t);
91
92 static int ae_mediachange(struct ifnet *);
93 static void ae_mediastatus(struct ifnet *, struct ifmediareq *);
94 static void ae_init(void *);
95 static void ae_start(struct ifnet *, struct ifaltq_subque *);
96 static int ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
97 static void ae_watchdog(struct ifnet *);
98 static void ae_stop(struct ae_softc *);
99 static void ae_tick(void *);
100
101 static void ae_intr(void *);
102 static void ae_tx_intr(struct ae_softc *);
103 static void ae_rx_intr(struct ae_softc *);
104 static int ae_rxeof(struct ae_softc *, struct ae_rxd *);
105
106 static int ae_encap(struct ae_softc *, struct mbuf **);
107 static void ae_sysctl_node(struct ae_softc *);
108 static void ae_phy_reset(struct ae_softc *);
109 static int ae_reset(struct ae_softc *);
110 static void ae_pcie_init(struct ae_softc *);
111 static void ae_get_eaddr(struct ae_softc *);
112 static void ae_dma_free(struct ae_softc *);
113 static int ae_dma_alloc(struct ae_softc *);
114 static void ae_mac_config(struct ae_softc *);
115 static void ae_stop_rxmac(struct ae_softc *);
116 static void ae_stop_txmac(struct ae_softc *);
117 static void ae_rxfilter(struct ae_softc *);
118 static void ae_rxvlan(struct ae_softc *);
119 static void ae_update_stats_rx(uint16_t, struct ae_stats *);
120 static void ae_update_stats_tx(uint16_t, struct ae_stats *);
121 static void ae_powersave_disable(struct ae_softc *);
122 static void ae_powersave_enable(struct ae_softc *);
123
124 static device_method_t ae_methods[] = {
125 /* Device interface. */
126 DEVMETHOD(device_probe, ae_probe),
127 DEVMETHOD(device_attach, ae_attach),
128 DEVMETHOD(device_detach, ae_detach),
129 DEVMETHOD(device_shutdown, ae_shutdown),
130 DEVMETHOD(device_suspend, ae_suspend),
131 DEVMETHOD(device_resume, ae_resume),
132
133 /* Bus interface. */
134 DEVMETHOD(bus_print_child, bus_generic_print_child),
135 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
136
137 /* MII interface. */
138 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
139 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
140 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
141 { NULL, NULL }
142 };
143
144 static driver_t ae_driver = {
145 "ae",
146 ae_methods,
147 sizeof(struct ae_softc)
148 };
149
150 static devclass_t ae_devclass;
151 DECLARE_DUMMY_MODULE(if_ae);
152 MODULE_DEPEND(if_ae, miibus, 1, 1, 1);
153 DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, NULL, NULL);
154 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, NULL, NULL);
155
156 /* Register access macros. */
157 #define AE_WRITE_4(_sc, reg, val) \
158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159 #define AE_WRITE_2(_sc, reg, val) \
160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161 #define AE_WRITE_1(_sc, reg, val) \
162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163 #define AE_READ_4(_sc, reg) \
164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165 #define AE_READ_2(_sc, reg) \
166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167 #define AE_READ_1(_sc, reg) \
168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
169
170 #define AE_PHY_READ(sc, reg) \
171 ae_miibus_readreg(sc->ae_dev, 0, reg)
172 #define AE_PHY_WRITE(sc, reg, val) \
173 ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174 #define AE_CHECK_EADDR_VALID(eaddr) \
175 ((eaddr[0] == 0 && eaddr[1] == 0) || \
176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177 #define AE_RXD_VLAN(vtag) \
178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179 #define AE_TXD_VLAN(vtag) \
180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
181
182 /*
183 * ae statistics.
184 */
185 #define STATS_ENTRY(node, desc, field) \
186 { node, desc, offsetof(struct ae_stats, field) }
187 struct {
188 const char *node;
189 const char *desc;
190 intptr_t offset;
191 } ae_stats_tx[] = {
192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
193 STATS_ENTRY("mcast", "multicast frames", tx_mcast),
194 STATS_ENTRY("pause", "PAUSE frames", tx_pause),
195 STATS_ENTRY("control", "control frames", tx_ctrl),
196 STATS_ENTRY("defers", "deferrals occuried", tx_defer),
197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
203 }, ae_stats_rx[] = {
204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
205 STATS_ENTRY("mcast", "multicast frames", rx_mcast),
206 STATS_ENTRY("pause", "PAUSE frames", rx_pause),
207 STATS_ENTRY("control", "control frames", rx_ctrl),
208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
210 STATS_ENTRY("runt", "runt frames", rx_runt),
211 STATS_ENTRY("frag", "fragmented frames", rx_frag),
212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
214 rx_trunc)
215 };
216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx)
217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx)
218
219 static void
220 ae_stop(struct ae_softc *sc)
221 {
222 struct ifnet *ifp = &sc->arpcom.ac_if;
223 int i;
224
225 ASSERT_SERIALIZED(ifp->if_serializer);
226
227 ifp->if_flags &= ~IFF_RUNNING;
228 ifq_clr_oactive(&ifp->if_snd);
229 ifp->if_timer = 0;
230
231 sc->ae_flags &= ~AE_FLAG_LINK;
232 callout_stop(&sc->ae_tick_ch);
233
234 /*
235 * Clear and disable interrupts.
236 */
237 AE_WRITE_4(sc, AE_IMR_REG, 0);
238 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
239
240 /*
241 * Stop Rx/Tx MACs.
242 */
243 ae_stop_txmac(sc);
244 ae_stop_rxmac(sc);
245
246 /*
247 * Stop DMA engines.
248 */
249 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
250 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
251
252 /*
253 * Wait for everything to enter idle state.
254 */
255 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
256 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
257 break;
258 DELAY(100);
259 }
260 if (i == AE_IDLE_TIMEOUT)
261 if_printf(ifp, "could not enter idle state in stop.\n");
262 }
263
264 static void
265 ae_stop_rxmac(struct ae_softc *sc)
266 {
267 uint32_t val;
268 int i;
269
270 /*
271 * Stop Rx MAC engine.
272 */
273 val = AE_READ_4(sc, AE_MAC_REG);
274 if ((val & AE_MAC_RX_EN) != 0) {
275 val &= ~AE_MAC_RX_EN;
276 AE_WRITE_4(sc, AE_MAC_REG, val);
277 }
278
279 /*
280 * Stop Rx DMA engine.
281 */
282 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
283 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
284
285 /*
286 * Wait for IDLE state.
287 */
288 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
289 val = AE_READ_4(sc, AE_IDLE_REG);
290 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
291 break;
292 DELAY(100);
293 }
294 if (i == AE_IDLE_TIMEOUT) {
295 if_printf(&sc->arpcom.ac_if,
296 "timed out while stopping Rx MAC.\n");
297 }
298 }
299
300 static void
301 ae_stop_txmac(struct ae_softc *sc)
302 {
303 uint32_t val;
304 int i;
305
306 /*
307 * Stop Tx MAC engine.
308 */
309 val = AE_READ_4(sc, AE_MAC_REG);
310 if ((val & AE_MAC_TX_EN) != 0) {
311 val &= ~AE_MAC_TX_EN;
312 AE_WRITE_4(sc, AE_MAC_REG, val);
313 }
314
315 /*
316 * Stop Tx DMA engine.
317 */
318 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
319 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
320
321 /*
322 * Wait for IDLE state.
323 */
324 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
325 val = AE_READ_4(sc, AE_IDLE_REG);
326 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
327 break;
328 DELAY(100);
329 }
330 if (i == AE_IDLE_TIMEOUT) {
331 if_printf(&sc->arpcom.ac_if,
332 "timed out while stopping Tx MAC.\n");
333 }
334 }
335
336 /*
337 * Callback from MII layer when media changes.
338 */
339 static void
340 ae_miibus_statchg(device_t dev)
341 {
342 struct ae_softc *sc = device_get_softc(dev);
343 struct ifnet *ifp = &sc->arpcom.ac_if;
344 struct mii_data *mii;
345 uint32_t val;
346
347 ASSERT_SERIALIZED(ifp->if_serializer);
348
349 if ((ifp->if_flags & IFF_RUNNING) == 0)
350 return;
351
352 mii = device_get_softc(sc->ae_miibus);
353 sc->ae_flags &= ~AE_FLAG_LINK;
354 if ((mii->mii_media_status & IFM_AVALID) != 0) {
355 switch (IFM_SUBTYPE(mii->mii_media_active)) {
356 case IFM_10_T:
357 case IFM_100_TX:
358 sc->ae_flags |= AE_FLAG_LINK;
359 break;
360 default:
361 break;
362 }
363 }
364
365 /* Stop Rx/Tx MACs. */
366 ae_stop_rxmac(sc);
367 ae_stop_txmac(sc);
368
369 /* Program MACs with resolved speed/duplex/flow-control. */
370 if ((sc->ae_flags & AE_FLAG_LINK) != 0) {
371 ae_mac_config(sc);
372
373 /*
374 * Restart DMA engines.
375 */
376 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
377 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
378
379 /*
380 * Enable Rx and Tx MACs.
381 */
382 val = AE_READ_4(sc, AE_MAC_REG);
383 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
384 AE_WRITE_4(sc, AE_MAC_REG, val);
385 }
386 }
387
388 static void
389 ae_sysctl_node(struct ae_softc *sc)
390 {
391 struct sysctl_ctx_list *ctx;
392 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
393 struct ae_stats *ae_stats;
394 unsigned int i;
395
396 ae_stats = &sc->stats;
397 sysctl_ctx_init(&sc->ae_sysctl_ctx);
398 sc->ae_sysctl_tree = SYSCTL_ADD_NODE(&sc->ae_sysctl_ctx,
399 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
400 device_get_nameunit(sc->ae_dev),
401 CTLFLAG_RD, 0, "");
402 if (sc->ae_sysctl_tree == NULL) {
403 device_printf(sc->ae_dev, "can't add sysctl node\n");
404 return;
405 }
406 ctx = &sc->ae_sysctl_ctx;
407 root = sc->ae_sysctl_tree;
408
409 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
410 CTLFLAG_RD, NULL, "ae statistics");
411 if (stats == NULL) {
412 device_printf(sc->ae_dev, "can't add stats sysctl node\n");
413 return;
414 }
415
416 /*
417 * Receiver statistcics.
418 */
419 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
420 CTLFLAG_RD, NULL, "Rx MAC statistics");
421 if (stats_rx != NULL) {
422 for (i = 0; i < AE_STATS_RX_LEN; i++) {
423 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx),
424 OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD,
425 (char *)ae_stats + ae_stats_rx[i].offset, 0,
426 ae_stats_rx[i].desc);
427 }
428 }
429
430 /*
431 * Transmitter statistcics.
432 */
433 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
434 CTLFLAG_RD, NULL, "Tx MAC statistics");
435 if (stats_tx != NULL) {
436 for (i = 0; i < AE_STATS_TX_LEN; i++) {
437 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx),
438 OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD,
439 (char *)ae_stats + ae_stats_tx[i].offset, 0,
440 ae_stats_tx[i].desc);
441 }
442 }
443 }
444
445 static int
446 ae_miibus_readreg(device_t dev, int phy, int reg)
447 {
448 struct ae_softc *sc = device_get_softc(dev);
449 uint32_t val;
450 int i;
451
452 /*
453 * Locking is done in upper layers.
454 */
455 if (phy != sc->ae_phyaddr)
456 return (0);
457 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
458 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
459 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
460 AE_WRITE_4(sc, AE_MDIO_REG, val);
461
462 /*
463 * Wait for operation to complete.
464 */
465 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
466 DELAY(2);
467 val = AE_READ_4(sc, AE_MDIO_REG);
468 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
469 break;
470 }
471 if (i == AE_MDIO_TIMEOUT) {
472 device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg);
473 return (0);
474 }
475 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
476 }
477
478 static int
479 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
480 {
481 struct ae_softc *sc = device_get_softc(dev);
482 uint32_t aereg;
483 int i;
484
485 /*
486 * Locking is done in upper layers.
487 */
488 if (phy != sc->ae_phyaddr)
489 return (0);
490 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
491 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
492 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
493 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
494 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
495
496 /*
497 * Wait for operation to complete.
498 */
499 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
500 DELAY(2);
501 aereg = AE_READ_4(sc, AE_MDIO_REG);
502 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
503 break;
504 }
505 if (i == AE_MDIO_TIMEOUT)
506 device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg);
507 return (0);
508 }
509
510 static int
511 ae_probe(device_t dev)
512 {
513 uint16_t vendor, devid;
514 const struct ae_dev *sp;
515
516 vendor = pci_get_vendor(dev);
517 devid = pci_get_device(dev);
518 for (sp = ae_devs; sp->ae_name != NULL; sp++) {
519 if (vendor == sp->ae_vendorid &&
520 devid == sp->ae_deviceid) {
521 device_set_desc(dev, sp->ae_name);
522 return (0);
523 }
524 }
525 return (ENXIO);
526 }
527
528 static int
529 ae_dma_alloc(struct ae_softc *sc)
530 {
531 bus_addr_t busaddr;
532 int error;
533
534 /*
535 * Create parent DMA tag.
536 */
537 error = bus_dma_tag_create(NULL, 1, 0,
538 BUS_SPACE_MAXADDR_32BIT,
539 BUS_SPACE_MAXADDR,
540 NULL, NULL,
541 BUS_SPACE_MAXSIZE_32BIT,
542 0,
543 BUS_SPACE_MAXSIZE_32BIT,
544 0, &sc->dma_parent_tag);
545 if (error) {
546 device_printf(sc->ae_dev, "could not creare parent DMA tag.\n");
547 return (error);
548 }
549
550 /*
551 * Create DMA stuffs for TxD.
552 */
553 sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
554 AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO,
555 &sc->dma_txd_tag, &sc->dma_txd_map,
556 &sc->dma_txd_busaddr);
557 if (sc->txd_base == NULL) {
558 device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n");
559 return ENOMEM;
560 }
561
562 /*
563 * Create DMA stuffs for TxS.
564 */
565 sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
566 AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO,
567 &sc->dma_txs_tag, &sc->dma_txs_map,
568 &sc->dma_txs_busaddr);
569 if (sc->txs_base == NULL) {
570 device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n");
571 return ENOMEM;
572 }
573
574 /*
575 * Create DMA stuffs for RxD.
576 */
577 sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128,
578 AE_RXD_COUNT_DEFAULT * 1536 + 120,
579 BUS_DMA_WAITOK | BUS_DMA_ZERO,
580 &sc->dma_rxd_tag, &sc->dma_rxd_map,
581 &busaddr);
582 if (sc->rxd_base_dma == NULL) {
583 device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n");
584 return ENOMEM;
585 }
586 sc->dma_rxd_busaddr = busaddr + 120;
587 sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120);
588
589 return (0);
590 }
591
592 static void
593 ae_mac_config(struct ae_softc *sc)
594 {
595 struct mii_data *mii;
596 uint32_t val;
597
598 mii = device_get_softc(sc->ae_miibus);
599 val = AE_READ_4(sc, AE_MAC_REG);
600 val &= ~AE_MAC_FULL_DUPLEX;
601 /* XXX disable AE_MAC_TX_FLOW_EN? */
602 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
603 val |= AE_MAC_FULL_DUPLEX;
604 AE_WRITE_4(sc, AE_MAC_REG, val);
605 }
606
607 static int
608 ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd)
609 {
610 struct ifnet *ifp = &sc->arpcom.ac_if;
611 struct mbuf *m;
612 unsigned int size;
613 uint16_t flags;
614
615 flags = le16toh(rxd->flags);
616 #ifdef AE_DEBUG
617 if_printf(ifp, "Rx interrupt occuried.\n");
618 #endif
619 size = le16toh(rxd->len) - ETHER_CRC_LEN;
620 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN -
621 sizeof(struct ether_vlan_header))) {
622 if_printf(ifp, "Runt frame received.");
623 return (EIO);
624 }
625
626 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
627 if (m == NULL)
628 return (ENOBUFS);
629
630 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
631 (flags & AE_RXD_HAS_VLAN)) {
632 m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan));
633 m->m_flags |= M_VLANTAG;
634 }
635 ifp->if_input(ifp, m);
636
637 return (0);
638 }
639
640 static void
641 ae_rx_intr(struct ae_softc *sc)
642 {
643 struct ifnet *ifp = &sc->arpcom.ac_if;
644 struct ae_rxd *rxd;
645 uint16_t flags;
646 int error;
647
648 /*
649 * Syncronize DMA buffers.
650 */
651 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
652 BUS_DMASYNC_POSTREAD);
653 for (;;) {
654 rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur);
655
656 flags = le16toh(rxd->flags);
657 if ((flags & AE_RXD_UPDATE) == 0)
658 break;
659 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
660
661 /* Update stats. */
662 ae_update_stats_rx(flags, &sc->stats);
663
664 /*
665 * Update position index.
666 */
667 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
668 if ((flags & AE_RXD_SUCCESS) == 0) {
669 IFNET_STAT_INC(ifp, ierrors, 1);
670 continue;
671 }
672
673 error = ae_rxeof(sc, rxd);
674 if (error)
675 IFNET_STAT_INC(ifp, ierrors, 1);
676 else
677 IFNET_STAT_INC(ifp, ipackets, 1);
678 }
679
680 /* Update Rx index. */
681 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
682 }
683
684 static void
685 ae_tx_intr(struct ae_softc *sc)
686 {
687 struct ifnet *ifp = &sc->arpcom.ac_if;
688 struct ae_txd *txd;
689 struct ae_txs *txs;
690 uint16_t flags;
691
692 /*
693 * Syncronize DMA buffers.
694 */
695 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD);
696 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD);
697
698 for (;;) {
699 txs = sc->txs_base + sc->txs_ack;
700
701 flags = le16toh(txs->flags);
702 if ((flags & AE_TXS_UPDATE) == 0)
703 break;
704 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
705
706 /* Update stats. */
707 ae_update_stats_tx(flags, &sc->stats);
708
709 /*
710 * Update TxS position.
711 */
712 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
713 sc->ae_flags |= AE_FLAG_TXAVAIL;
714 txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack);
715 if (txs->len != txd->len) {
716 device_printf(sc->ae_dev, "Size mismatch: "
717 "TxS:%d TxD:%d\n",
718 le16toh(txs->len), le16toh(txd->len));
719 }
720
721 /*
722 * Move txd ack and align on 4-byte boundary.
723 */
724 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
725 AE_TXD_BUFSIZE_DEFAULT;
726 if ((flags & AE_TXS_SUCCESS) != 0)
727 IFNET_STAT_INC(ifp, opackets, 1);
728 else
729 IFNET_STAT_INC(ifp, oerrors, 1);
730 sc->tx_inproc--;
731 }
732
733 if (sc->tx_inproc < 0) {
734 /* XXX assert? */
735 if_printf(ifp, "Received stray Tx interrupt(s).\n");
736 sc->tx_inproc = 0;
737 }
738 if (sc->tx_inproc == 0)
739 ifp->if_timer = 0; /* Unarm watchdog. */
740 if (sc->ae_flags & AE_FLAG_TXAVAIL) {
741 ifq_clr_oactive(&ifp->if_snd);
742 if (!ifq_is_empty(&ifp->if_snd))
743 #ifdef foo
744 ae_intr(sc);
745 #else
746 if_devstart(ifp);
747 #endif
748 }
749
750 /*
751 * Syncronize DMA buffers.
752 */
753 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
754 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
755 }
756
757 static void
758 ae_intr(void *xsc)
759 {
760 struct ae_softc *sc = xsc;
761 struct ifnet *ifp = &sc->arpcom.ac_if;
762 uint32_t val;
763
764 ASSERT_SERIALIZED(ifp->if_serializer);
765
766 val = AE_READ_4(sc, AE_ISR_REG);
767 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
768 return;
769
770 #ifdef foo
771 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
772 #endif
773
774 /* Read interrupt status. */
775 val = AE_READ_4(sc, AE_ISR_REG);
776
777 /* Clear interrupts and disable them. */
778 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
779
780 if (ifp->if_flags & IFF_RUNNING) {
781 if (val & (AE_ISR_DMAR_TIMEOUT |
782 AE_ISR_DMAW_TIMEOUT |
783 AE_ISR_PHY_LINKDOWN)) {
784 ae_init(sc);
785 }
786 if (val & AE_ISR_TX_EVENT)
787 ae_tx_intr(sc);
788 if (val & AE_ISR_RX_EVENT)
789 ae_rx_intr(sc);
790 }
791
792 /* Re-enable interrupts. */
793 AE_WRITE_4(sc, AE_ISR_REG, 0);
794 }
795
796 static void
797 ae_init(void *xsc)
798 {
799 struct ae_softc *sc = xsc;
800 struct ifnet *ifp = &sc->arpcom.ac_if;
801 struct mii_data *mii;
802 uint8_t eaddr[ETHER_ADDR_LEN];
803 uint32_t val;
804 bus_addr_t addr;
805
806 ASSERT_SERIALIZED(ifp->if_serializer);
807
808 mii = device_get_softc(sc->ae_miibus);
809 ae_stop(sc);
810 ae_reset(sc);
811 ae_pcie_init(sc);
812 ae_powersave_disable(sc);
813
814 /*
815 * Clear and disable interrupts.
816 */
817 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
818
819 /*
820 * Set the MAC address.
821 */
822 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
823 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
824 AE_WRITE_4(sc, AE_EADDR0_REG, val);
825 val = eaddr[0] << 8 | eaddr[1];
826 AE_WRITE_4(sc, AE_EADDR1_REG, val);
827
828 /*
829 * Set ring buffers base addresses.
830 */
831 addr = sc->dma_rxd_busaddr;
832 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
833 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
834 addr = sc->dma_txd_busaddr;
835 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
836 addr = sc->dma_txs_busaddr;
837 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
838
839 /*
840 * Configure ring buffers sizes.
841 */
842 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
843 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
844 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
845
846 /*
847 * Configure interframe gap parameters.
848 */
849 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
850 AE_IFG_TXIPG_MASK) |
851 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
852 AE_IFG_RXIPG_MASK) |
853 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
854 AE_IFG_IPGR1_MASK) |
855 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
856 AE_IFG_IPGR2_MASK);
857 AE_WRITE_4(sc, AE_IFG_REG, val);
858
859 /*
860 * Configure half-duplex operation.
861 */
862 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
863 AE_HDPX_LCOL_MASK) |
864 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
865 AE_HDPX_RETRY_MASK) |
866 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
867 AE_HDPX_ABEBT_MASK) |
868 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
869 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
870 AE_WRITE_4(sc, AE_HDPX_REG, val);
871
872 /*
873 * Configure interrupt moderate timer.
874 */
875 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
876 val = AE_READ_4(sc, AE_MASTER_REG);
877 val |= AE_MASTER_IMT_EN;
878 AE_WRITE_4(sc, AE_MASTER_REG, val);
879
880 /*
881 * Configure interrupt clearing timer.
882 */
883 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
884
885 /*
886 * Configure MTU.
887 */
888 val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) +
889 ETHER_CRC_LEN;
890 AE_WRITE_2(sc, AE_MTU_REG, val);
891
892 /*
893 * Configure cut-through threshold.
894 */
895 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
896
897 /*
898 * Configure flow control.
899 */
900 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
901 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
902 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
903 (AE_RXD_COUNT_DEFAULT / 12));
904
905 /*
906 * Init mailboxes.
907 */
908 sc->txd_cur = sc->rxd_cur = 0;
909 sc->txs_ack = sc->txd_ack = 0;
910 sc->rxd_cur = 0;
911 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
912 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
913 sc->tx_inproc = 0;
914 sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
915
916 /*
917 * Enable DMA.
918 */
919 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
920 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
921
922 /*
923 * Check if everything is OK.
924 */
925 val = AE_READ_4(sc, AE_ISR_REG);
926 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
927 device_printf(sc->ae_dev, "Initialization failed.\n");
928 return;
929 }
930
931 /*
932 * Clear interrupt status.
933 */
934 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
935 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
936
937 /*
938 * Enable interrupts.
939 */
940 val = AE_READ_4(sc, AE_MASTER_REG);
941 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
942 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
943
944 /*
945 * Disable WOL.
946 */
947 AE_WRITE_4(sc, AE_WOL_REG, 0);
948
949 /*
950 * Configure MAC.
951 */
952 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
953 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
954 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
955 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
956 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
957 AE_MAC_PREAMBLE_MASK);
958 AE_WRITE_4(sc, AE_MAC_REG, val);
959
960 /*
961 * Configure Rx MAC.
962 */
963 ae_rxfilter(sc);
964 ae_rxvlan(sc);
965
966 /*
967 * Enable Tx/Rx.
968 */
969 val = AE_READ_4(sc, AE_MAC_REG);
970 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
971
972 sc->ae_flags &= ~AE_FLAG_LINK;
973 mii_mediachg(mii); /* Switch to the current media. */
974
975 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
976 ifp->if_flags |= IFF_RUNNING;
977 ifq_clr_oactive(&ifp->if_snd);
978 }
979
980 static void
981 ae_watchdog(struct ifnet *ifp)
982 {
983 struct ae_softc *sc = ifp->if_softc;
984
985 ASSERT_SERIALIZED(ifp->if_serializer);
986
987 if ((sc->ae_flags & AE_FLAG_LINK) == 0)
988 if_printf(ifp, "watchdog timeout (missed link).\n");
989 else
990 if_printf(ifp, "watchdog timeout - resetting.\n");
991 IFNET_STAT_INC(ifp, oerrors, 1);
992
993 ae_init(sc);
994 if (!ifq_is_empty(&ifp->if_snd))
995 if_devstart(ifp);
996 }
997
998 static void
999 ae_tick(void *xsc)
1000 {
1001 struct ae_softc *sc = xsc;
1002 struct ifnet *ifp = &sc->arpcom.ac_if;
1003 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1004
1005 lwkt_serialize_enter(ifp->if_serializer);
1006 mii_tick(mii);
1007 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
1008 lwkt_serialize_exit(ifp->if_serializer);
1009 }
1010
1011 static void
1012 ae_rxvlan(struct ae_softc *sc)
1013 {
1014 struct ifnet *ifp = &sc->arpcom.ac_if;
1015 uint32_t val;
1016
1017 val = AE_READ_4(sc, AE_MAC_REG);
1018 val &= ~AE_MAC_RMVLAN_EN;
1019 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1020 val |= AE_MAC_RMVLAN_EN;
1021 AE_WRITE_4(sc, AE_MAC_REG, val);
1022 }
1023
1024 static void
1025 ae_rxfilter(struct ae_softc *sc)
1026 {
1027 struct ifnet *ifp = &sc->arpcom.ac_if;
1028 struct ifmultiaddr *ifma;
1029 uint32_t crc;
1030 uint32_t mchash[2];
1031 uint32_t rxcfg;
1032
1033 rxcfg = AE_READ_4(sc, AE_MAC_REG);
1034 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
1035 rxcfg |= AE_MAC_BCAST_EN;
1036 if (ifp->if_flags & IFF_PROMISC)
1037 rxcfg |= AE_MAC_PROMISC_EN;
1038 if (ifp->if_flags & IFF_ALLMULTI)
1039 rxcfg |= AE_MAC_MCAST_EN;
1040
1041 /*
1042 * Wipe old settings.
1043 */
1044 AE_WRITE_4(sc, AE_REG_MHT0, 0);
1045 AE_WRITE_4(sc, AE_REG_MHT1, 0);
1046 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1047 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
1048 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
1049 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1050 return;
1051 }
1052
1053 /*
1054 * Load multicast tables.
1055 */
1056 bzero(mchash, sizeof(mchash));
1057 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1058 if (ifma->ifma_addr->sa_family != AF_LINK)
1059 continue;
1060 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1061 ifma->ifma_addr), ETHER_ADDR_LEN);
1062 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1063 }
1064 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
1065 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
1066 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1067 }
1068
1069 static unsigned int
1070 ae_tx_avail_size(struct ae_softc *sc)
1071 {
1072 unsigned int avail;
1073
1074 if (sc->txd_cur >= sc->txd_ack)
1075 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1076 else
1077 avail = sc->txd_ack - sc->txd_cur;
1078 return (avail - 4); /* 4-byte header. */
1079 }
1080
1081 static int
1082 ae_encap(struct ae_softc *sc, struct mbuf **m_head)
1083 {
1084 struct mbuf *m0;
1085 struct ae_txd *hdr;
1086 unsigned int to_end;
1087 uint16_t len;
1088
1089 M_ASSERTPKTHDR((*m_head));
1090 m0 = *m_head;
1091 len = m0->m_pkthdr.len;
1092 if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 ||
1093 ae_tx_avail_size(sc) < len) {
1094 #ifdef AE_DEBUG
1095 if_printf(sc->ifp, "No free Tx available.\n");
1096 #endif
1097 return ENOBUFS;
1098 }
1099
1100 hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur);
1101 bzero(hdr, sizeof(*hdr));
1102
1103 /* Header size. */
1104 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT;
1105
1106 /* Space available to the end of the ring */
1107 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1108
1109 if (to_end >= len) {
1110 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1111 } else {
1112 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1113 sc->txd_cur));
1114 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1115 }
1116
1117 /*
1118 * Set TxD flags and parameters.
1119 */
1120 if ((m0->m_flags & M_VLANTAG) != 0) {
1121 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag));
1122 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1123 } else {
1124 hdr->len = htole16(len);
1125 }
1126
1127 /*
1128 * Set current TxD position and round up to a 4-byte boundary.
1129 */
1130 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1131 if (sc->txd_cur == sc->txd_ack)
1132 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1133 #ifdef AE_DEBUG
1134 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1135 #endif
1136
1137 /*
1138 * Update TxS position and check if there are empty TxS available.
1139 */
1140 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1141 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1142 if (sc->txs_cur == sc->txs_ack)
1143 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1144
1145 /*
1146 * Synchronize DMA memory.
1147 */
1148 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
1149 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
1150
1151 return (0);
1152 }
1153
1154 static void
1155 ae_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1156 {
1157 struct ae_softc *sc = ifp->if_softc;
1158 int error, trans;
1159
1160 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1161 ASSERT_SERIALIZED(ifp->if_serializer);
1162
1163 #ifdef AE_DEBUG
1164 if_printf(ifp, "Start called.\n");
1165 #endif
1166 if ((sc->ae_flags & AE_FLAG_LINK) == 0) {
1167 ifq_purge(&ifp->if_snd);
1168 return;
1169 }
1170 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1171 return;
1172
1173 trans = 0;
1174 while (!ifq_is_empty(&ifp->if_snd)) {
1175 struct mbuf *m0;
1176
1177 m0 = ifq_dequeue(&ifp->if_snd);
1178 if (m0 == NULL)
1179 break; /* Nothing to do. */
1180
1181 error = ae_encap(sc, &m0);
1182 if (error != 0) {
1183 if (m0 != NULL) {
1184 ifq_prepend(&ifp->if_snd, m0);
1185 ifq_set_oactive(&ifp->if_snd);
1186 #ifdef AE_DEBUG
1187 if_printf(ifp, "Setting OACTIVE.\n");
1188 #endif
1189 }
1190 break;
1191 }
1192 trans = 1;
1193 sc->tx_inproc++;
1194
1195 /* Bounce a copy of the frame to BPF. */
1196 ETHER_BPF_MTAP(ifp, m0);
1197 m_freem(m0);
1198 }
1199 if (trans) { /* Something was dequeued. */
1200 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1201 ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1202 #ifdef AE_DEBUG
1203 if_printf(ifp, "%d packets dequeued.\n", count);
1204 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1205 #endif
1206 }
1207 }
1208
1209 static int
1210 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1211 {
1212 struct ae_softc *sc = ifp->if_softc;
1213 struct ifreq *ifr;
1214 struct mii_data *mii;
1215 int error = 0, mask;
1216
1217 ASSERT_SERIALIZED(ifp->if_serializer);
1218
1219 ifr = (struct ifreq *)data;
1220 switch (cmd) {
1221 case SIOCSIFFLAGS:
1222 if (ifp->if_flags & IFF_UP) {
1223 if (ifp->if_flags & IFF_RUNNING) {
1224 if (((ifp->if_flags ^ sc->ae_if_flags)
1225 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1226 ae_rxfilter(sc);
1227 } else {
1228 ae_init(sc);
1229 }
1230 } else {
1231 if (ifp->if_flags & IFF_RUNNING)
1232 ae_stop(sc);
1233 }
1234 sc->ae_if_flags = ifp->if_flags;
1235 break;
1236
1237 case SIOCADDMULTI:
1238 case SIOCDELMULTI:
1239 if (ifp->if_flags & IFF_RUNNING)
1240 ae_rxfilter(sc);
1241 break;
1242
1243 case SIOCSIFMEDIA:
1244 case SIOCGIFMEDIA:
1245 mii = device_get_softc(sc->ae_miibus);
1246 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1247 break;
1248
1249 case SIOCSIFCAP:
1250 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1251 if (mask & IFCAP_VLAN_HWTAGGING) {
1252 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1253 ae_rxvlan(sc);
1254 }
1255 break;
1256
1257 default:
1258 error = ether_ioctl(ifp, cmd, data);
1259 break;
1260 }
1261 return (error);
1262 }
1263
1264 static int
1265 ae_attach(device_t dev)
1266 {
1267 struct ae_softc *sc = device_get_softc(dev);
1268 struct ifnet *ifp = &sc->arpcom.ac_if;
1269 int error = 0;
1270
1271 sc->ae_dev = dev;
1272 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1273 callout_init(&sc->ae_tick_ch);
1274
1275 /* Enable bus mastering */
1276 pci_enable_busmaster(dev);
1277
1278 /*
1279 * Allocate memory mapped IO
1280 */
1281 sc->ae_mem_rid = PCIR_BAR(0);
1282 sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1283 &sc->ae_mem_rid, RF_ACTIVE);
1284 if (sc->ae_mem_res == NULL) {
1285 device_printf(dev, "can't allocate IO memory\n");
1286 return ENXIO;
1287 }
1288 sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res);
1289 sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res);
1290
1291 /*
1292 * Allocate IRQ
1293 */
1294 sc->ae_irq_rid = 0;
1295 sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1296 &sc->ae_irq_rid,
1297 RF_SHAREABLE | RF_ACTIVE);
1298 if (sc->ae_irq_res == NULL) {
1299 device_printf(dev, "can't allocate irq\n");
1300 error = ENXIO;
1301 goto fail;
1302 }
1303
1304 /* Set PHY address. */
1305 sc->ae_phyaddr = AE_PHYADDR_DEFAULT;
1306
1307 /* Create sysctl tree */
1308 ae_sysctl_node(sc);
1309
1310 /* Reset PHY. */
1311 ae_phy_reset(sc);
1312
1313 /*
1314 * Reset the ethernet controller.
1315 */
1316 ae_reset(sc);
1317 ae_pcie_init(sc);
1318
1319 /*
1320 * Get PCI and chip id/revision.
1321 */
1322 sc->ae_rev = pci_get_revid(dev);
1323 sc->ae_chip_rev =
1324 (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
1325 AE_MASTER_REVNUM_MASK;
1326 if (bootverbose) {
1327 device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev);
1328 device_printf(dev, "Chip id/revision : 0x%04x\n",
1329 sc->ae_chip_rev);
1330 }
1331
1332 /*
1333 * XXX
1334 * Unintialized hardware returns an invalid chip id/revision
1335 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1336 * unplugged cable results in putting hardware into automatic
1337 * power down mode which in turn returns invalld chip revision.
1338 */
1339 if (sc->ae_chip_rev == 0xFFFF) {
1340 device_printf(dev,"invalid chip revision : 0x%04x -- "
1341 "not initialized?\n", sc->ae_chip_rev);
1342 error = ENXIO;
1343 goto fail;
1344 }
1345 #if 0
1346 /* Get DMA parameters from PCIe device control register. */
1347 pcie_ptr = pci_get_pciecap_ptr(dev);
1348 if (pcie_ptr) {
1349 uint16_t devctl;
1350 sc->ae_flags |= AE_FLAG_PCIE;
1351 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
1352 /* Max read request size. */
1353 sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) <<
1354 DMA_CFG_RD_BURST_SHIFT;
1355 /* Max payload size. */
1356 sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) <<
1357 DMA_CFG_WR_BURST_SHIFT;
1358 if (bootverbose) {
1359 device_printf(dev, "Read request size : %d bytes.\n",
1360 128 << ((devctl >> 12) & 0x07));
1361 device_printf(dev, "TLP payload size : %d bytes.\n",
1362 128 << ((devctl >> 5) & 0x07));
1363 }
1364 } else {
1365 sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128;
1366 sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128;
1367 }
1368 #endif
1369
1370 /* Create DMA stuffs */
1371 error = ae_dma_alloc(sc);
1372 if (error)
1373 goto fail;
1374
1375 /* Load station address. */
1376 ae_get_eaddr(sc);
1377
1378 ifp->if_softc = sc;
1379 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1380 ifp->if_ioctl = ae_ioctl;
1381 ifp->if_start = ae_start;
1382 ifp->if_init = ae_init;
1383 ifp->if_watchdog = ae_watchdog;
1384 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1);
1385 ifq_set_ready(&ifp->if_snd);
1386 ifp->if_capabilities = IFCAP_VLAN_MTU |
1387 IFCAP_VLAN_HWTAGGING;
1388 ifp->if_hwassist = 0;
1389 ifp->if_capenable = ifp->if_capabilities;
1390
1391 /* Set up MII bus. */
1392 error = mii_phy_probe(dev, &sc->ae_miibus,
1393 ae_mediachange, ae_mediastatus);
1394 if (error) {
1395 device_printf(dev, "no PHY found!\n");
1396 goto fail;
1397 }
1398 ether_ifattach(ifp, sc->ae_eaddr, NULL);
1399
1400 /* Tell the upper layer(s) we support long frames. */
1401 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1402
1403 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ae_irq_res));
1404
1405 error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc,
1406 &sc->ae_irq_handle, ifp->if_serializer);
1407 if (error) {
1408 device_printf(dev, "could not set up interrupt handler.\n");
1409 ether_ifdetach(ifp);
1410 goto fail;
1411 }
1412 return 0;
1413 fail:
1414 ae_detach(dev);
1415 return (error);
1416 }
1417
1418 static int
1419 ae_detach(device_t dev)
1420 {
1421 struct ae_softc *sc = device_get_softc(dev);
1422
1423 if (device_is_attached(dev)) {
1424 struct ifnet *ifp = &sc->arpcom.ac_if;
1425
1426 lwkt_serialize_enter(ifp->if_serializer);
1427 sc->ae_flags |= AE_FLAG_DETACH;
1428 ae_stop(sc);
1429 bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle);
1430 lwkt_serialize_exit(ifp->if_serializer);
1431
1432 ether_ifdetach(ifp);
1433 }
1434
1435 if (sc->ae_miibus != NULL)
1436 device_delete_child(dev, sc->ae_miibus);
1437 bus_generic_detach(dev);
1438
1439 if (sc->ae_irq_res != NULL) {
1440 bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid,
1441 sc->ae_irq_res);
1442 }
1443 if (sc->ae_mem_res != NULL) {
1444 bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid,
1445 sc->ae_mem_res);
1446 }
1447
1448 if (sc->ae_sysctl_tree != NULL)
1449 sysctl_ctx_free(&sc->ae_sysctl_ctx);
1450
1451 ae_dma_free(sc);
1452
1453 return (0);
1454 }
1455
1456 static void
1457 ae_dma_free(struct ae_softc *sc)
1458 {
1459 if (sc->dma_txd_tag != NULL) {
1460 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1461 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1462 sc->dma_txd_map);
1463 bus_dma_tag_destroy(sc->dma_txd_tag);
1464 }
1465 if (sc->dma_txs_tag != NULL) {
1466 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1467 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1468 sc->dma_txs_map);
1469 bus_dma_tag_destroy(sc->dma_txs_tag);
1470 }
1471 if (sc->dma_rxd_tag != NULL) {
1472 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1473 bus_dmamem_free(sc->dma_rxd_tag,
1474 sc->rxd_base_dma, sc->dma_rxd_map);
1475 bus_dma_tag_destroy(sc->dma_rxd_tag);
1476 }
1477 if (sc->dma_parent_tag != NULL)
1478 bus_dma_tag_destroy(sc->dma_parent_tag);
1479 }
1480
1481 static void
1482 ae_pcie_init(struct ae_softc *sc)
1483 {
1484 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG,
1485 AE_PCIE_LTSSM_TESTMODE_DEFAULT);
1486 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG,
1487 AE_PCIE_DLL_TX_CTRL_DEFAULT);
1488 }
1489
1490 static void
1491 ae_phy_reset(struct ae_softc *sc)
1492 {
1493 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
1494 DELAY(1000); /* XXX: pause(9) ? */
1495 }
1496
1497 static int
1498 ae_reset(struct ae_softc *sc)
1499 {
1500 int i;
1501
1502 /*
1503 * Issue a soft reset.
1504 */
1505 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
1506 bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4,
1507 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1508
1509 /*
1510 * Wait for reset to complete.
1511 */
1512 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
1513 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
1514 break;
1515 DELAY(10);
1516 }
1517 if (i == AE_RESET_TIMEOUT) {
1518 device_printf(sc->ae_dev, "reset timeout.\n");
1519 return (ENXIO);
1520 }
1521
1522 /*
1523 * Wait for everything to enter idle state.
1524 */
1525 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1526 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
1527 break;
1528 DELAY(100);
1529 }
1530 if (i == AE_IDLE_TIMEOUT) {
1531 device_printf(sc->ae_dev, "could not enter idle state.\n");
1532 return (ENXIO);
1533 }
1534 return (0);
1535 }
1536
1537 static int
1538 ae_check_eeprom_present(struct ae_softc *sc, int *vpdc)
1539 {
1540 int error;
1541 uint32_t val;
1542
1543 /*
1544 * Not sure why, but Linux does this.
1545 */
1546 val = AE_READ_4(sc, AE_SPICTL_REG);
1547 if ((val & AE_SPICTL_VPD_EN) != 0) {
1548 val &= ~AE_SPICTL_VPD_EN;
1549 AE_WRITE_4(sc, AE_SPICTL_REG, val);
1550 }
1551 error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc);
1552 return (error);
1553 }
1554
1555 static int
1556 ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word)
1557 {
1558 uint32_t val;
1559 int i;
1560
1561 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
1562
1563 /*
1564 * VPD registers start at offset 0x100. Read them.
1565 */
1566 val = 0x100 + reg * 4;
1567 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
1568 AE_VPD_CAP_ADDR_MASK);
1569 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
1570 DELAY(2000);
1571 val = AE_READ_4(sc, AE_VPD_CAP_REG);
1572 if ((val & AE_VPD_CAP_DONE) != 0)
1573 break;
1574 }
1575 if (i == AE_VPD_TIMEOUT) {
1576 device_printf(sc->ae_dev, "timeout reading VPD register %d.\n",
1577 reg);
1578 return (ETIMEDOUT);
1579 }
1580 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
1581 return (0);
1582 }
1583
1584 static int
1585 ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1586 {
1587 uint32_t word, reg, val;
1588 int error;
1589 int found;
1590 int vpdc;
1591 int i;
1592
1593 /*
1594 * Check for EEPROM.
1595 */
1596 error = ae_check_eeprom_present(sc, &vpdc);
1597 if (error != 0)
1598 return (error);
1599
1600 /*
1601 * Read the VPD configuration space.
1602 * Each register is prefixed with signature,
1603 * so we can check if it is valid.
1604 */
1605 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
1606 error = ae_vpd_read_word(sc, i, &word);
1607 if (error != 0)
1608 break;
1609
1610 /*
1611 * Check signature.
1612 */
1613 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1614 break;
1615 reg = word >> AE_VPD_REG_SHIFT;
1616 i++; /* Move to the next word. */
1617 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1618 continue;
1619
1620 error = ae_vpd_read_word(sc, i, &val);
1621 if (error != 0)
1622 break;
1623 if (reg == AE_EADDR0_REG)
1624 eaddr[0] = val;
1625 else
1626 eaddr[1] = val;
1627 found++;
1628 }
1629 if (found < 2)
1630 return (ENOENT);
1631
1632 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1633 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1634 if (bootverbose)
1635 device_printf(sc->ae_dev,
1636 "VPD ethernet address registers are invalid.\n");
1637 return (EINVAL);
1638 }
1639 return (0);
1640 }
1641
1642 static int
1643 ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1644 {
1645 /*
1646 * BIOS is supposed to set this.
1647 */
1648 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1649 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1650 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1651 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1652 if (bootverbose)
1653 device_printf(sc->ae_dev,
1654 "Ethetnet address registers are invalid.\n");
1655 return (EINVAL);
1656 }
1657 return (0);
1658 }
1659
1660 static void
1661 ae_get_eaddr(struct ae_softc *sc)
1662 {
1663 uint32_t eaddr[2] = {0, 0};
1664 int error;
1665
1666 /*
1667 *Check for EEPROM.
1668 */
1669 error = ae_get_vpd_eaddr(sc, eaddr);
1670 if (error)
1671 error = ae_get_reg_eaddr(sc, eaddr);
1672 if (error) {
1673 if (bootverbose)
1674 device_printf(sc->ae_dev,
1675 "Generating random ethernet address.\n");
1676 eaddr[0] = karc4random();
1677 /*
1678 * Set OUI to ASUSTek COMPUTER INC.
1679 */
1680 sc->ae_eaddr[0] = 0x02; /* U/L bit set. */
1681 sc->ae_eaddr[1] = 0x1f;
1682 sc->ae_eaddr[2] = 0xc6;
1683 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1684 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1685 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1686 } else {
1687 sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff;
1688 sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff;
1689 sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff;
1690 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1691 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1692 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1693 }
1694 }
1695
1696 static int
1697 ae_mediachange(struct ifnet *ifp)
1698 {
1699 struct ae_softc *sc = ifp->if_softc;
1700 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1701 int error;
1702
1703 ASSERT_SERIALIZED(ifp->if_serializer);
1704 if (mii->mii_instance != 0) {
1705 struct mii_softc *miisc;
1706 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1707 mii_phy_reset(miisc);
1708 }
1709 error = mii_mediachg(mii);
1710 return (error);
1711 }
1712
1713 static void
1714 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1715 {
1716 struct ae_softc *sc = ifp->if_softc;
1717 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1718
1719 ASSERT_SERIALIZED(ifp->if_serializer);
1720 mii_pollstat(mii);
1721 ifmr->ifm_status = mii->mii_media_status;
1722 ifmr->ifm_active = mii->mii_media_active;
1723 }
1724
1725 static void
1726 ae_update_stats_tx(uint16_t flags, struct ae_stats *stats)
1727 {
1728 if ((flags & AE_TXS_BCAST) != 0)
1729 stats->tx_bcast++;
1730 if ((flags & AE_TXS_MCAST) != 0)
1731 stats->tx_mcast++;
1732 if ((flags & AE_TXS_PAUSE) != 0)
1733 stats->tx_pause++;
1734 if ((flags & AE_TXS_CTRL) != 0)
1735 stats->tx_ctrl++;
1736 if ((flags & AE_TXS_DEFER) != 0)
1737 stats->tx_defer++;
1738 if ((flags & AE_TXS_EXCDEFER) != 0)
1739 stats->tx_excdefer++;
1740 if ((flags & AE_TXS_SINGLECOL) != 0)
1741 stats->tx_singlecol++;
1742 if ((flags & AE_TXS_MULTICOL) != 0)
1743 stats->tx_multicol++;
1744 if ((flags & AE_TXS_LATECOL) != 0)
1745 stats->tx_latecol++;
1746 if ((flags & AE_TXS_ABORTCOL) != 0)
1747 stats->tx_abortcol++;
1748 if ((flags & AE_TXS_UNDERRUN) != 0)
1749 stats->tx_underrun++;
1750 }
1751
1752 static void
1753 ae_update_stats_rx(uint16_t flags, struct ae_stats *stats)
1754 {
1755 if ((flags & AE_RXD_BCAST) != 0)
1756 stats->rx_bcast++;
1757 if ((flags & AE_RXD_MCAST) != 0)
1758 stats->rx_mcast++;
1759 if ((flags & AE_RXD_PAUSE) != 0)
1760 stats->rx_pause++;
1761 if ((flags & AE_RXD_CTRL) != 0)
1762 stats->rx_ctrl++;
1763 if ((flags & AE_RXD_CRCERR) != 0)
1764 stats->rx_crcerr++;
1765 if ((flags & AE_RXD_CODEERR) != 0)
1766 stats->rx_codeerr++;
1767 if ((flags & AE_RXD_RUNT) != 0)
1768 stats->rx_runt++;
1769 if ((flags & AE_RXD_FRAG) != 0)
1770 stats->rx_frag++;
1771 if ((flags & AE_RXD_TRUNC) != 0)
1772 stats->rx_trunc++;
1773 if ((flags & AE_RXD_ALIGN) != 0)
1774 stats->rx_align++;
1775 }
1776
1777 static int
1778 ae_resume(device_t dev)
1779 {
1780 struct ae_softc *sc = device_get_softc(dev);
1781 struct ifnet *ifp = &sc->arpcom.ac_if;
1782
1783 lwkt_serialize_enter(ifp->if_serializer);
1784 #if 0
1785 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1786 #endif
1787 ae_phy_reset(sc);
1788 if ((ifp->if_flags & IFF_UP) != 0)
1789 ae_init(sc);
1790 lwkt_serialize_exit(ifp->if_serializer);
1791 return (0);
1792 }
1793
1794 static int
1795 ae_suspend(device_t dev)
1796 {
1797 struct ae_softc *sc = device_get_softc(dev);
1798 struct ifnet *ifp = &sc->arpcom.ac_if;
1799
1800 lwkt_serialize_enter(ifp->if_serializer);
1801 ae_stop(sc);
1802 #if 0
1803 /* we don't use ae_pm_init because we don't want WOL */
1804 ae_pm_init(sc);
1805 #endif
1806 lwkt_serialize_exit(ifp->if_serializer);
1807 return (0);
1808 }
1809
1810 static int
1811 ae_shutdown(device_t dev)
1812 {
1813 struct ae_softc *sc = device_get_softc(dev);
1814 struct ifnet *ifp = &sc->arpcom.ac_if;
1815
1816 ae_suspend(dev);
1817
1818 lwkt_serialize_enter(ifp->if_serializer);
1819 ae_powersave_enable(sc);
1820 lwkt_serialize_exit(ifp->if_serializer);
1821
1822 return (0);
1823 }
1824
1825 static void
1826 ae_powersave_disable(struct ae_softc *sc)
1827 {
1828 uint32_t val;
1829
1830 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1831 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1832 if (val & AE_PHY_DBG_POWERSAVE) {
1833 val &= ~AE_PHY_DBG_POWERSAVE;
1834 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1835 DELAY(1000);
1836 }
1837 }
1838
1839 static void
1840 ae_powersave_enable(struct ae_softc *sc)
1841 {
1842 uint32_t val;
1843
1844 /*
1845 * XXX magic numbers.
1846 */
1847 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1848 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1849 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1850 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1851 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1852 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1853 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1854 }
Cache object: fb71890adc0fbd116dc2e65c013fd9fb
|