FreeBSD/Linux Kernel Cross Reference
sys/dev/ae/if_ae.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
28 *
29 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/rman.h>
45 #include <sys/module.h>
46 #include <sys/queue.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
51
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/ethernet.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61
62 #include <netinet/in.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71
72 #include <machine/bus.h>
73
74 #include "miibus_if.h"
75
76 #include "if_aereg.h"
77 #include "if_aevar.h"
78
79 /*
80 * Devices supported by this driver.
81 */
82 static struct ae_dev {
83 uint16_t vendorid;
84 uint16_t deviceid;
85 const char *name;
86 } ae_devs[] = {
87 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
88 "Attansic Technology Corp, L2 FastEthernet" },
89 };
90 #define AE_DEVS_COUNT nitems(ae_devs)
91
92 static struct resource_spec ae_res_spec_mem[] = {
93 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
94 { -1, 0, 0 }
95 };
96 static struct resource_spec ae_res_spec_irq[] = {
97 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
98 { -1, 0, 0 }
99 };
100 static struct resource_spec ae_res_spec_msi[] = {
101 { SYS_RES_IRQ, 1, RF_ACTIVE },
102 { -1, 0, 0 }
103 };
104
105 static int ae_probe(device_t dev);
106 static int ae_attach(device_t dev);
107 static void ae_pcie_init(ae_softc_t *sc);
108 static void ae_phy_reset(ae_softc_t *sc);
109 static void ae_phy_init(ae_softc_t *sc);
110 static int ae_reset(ae_softc_t *sc);
111 static void ae_init(void *arg);
112 static int ae_init_locked(ae_softc_t *sc);
113 static int ae_detach(device_t dev);
114 static int ae_miibus_readreg(device_t dev, int phy, int reg);
115 static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
116 static void ae_miibus_statchg(device_t dev);
117 static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
118 static int ae_mediachange(struct ifnet *ifp);
119 static void ae_retrieve_address(ae_softc_t *sc);
120 static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
121 int error);
122 static int ae_alloc_rings(ae_softc_t *sc);
123 static void ae_dma_free(ae_softc_t *sc);
124 static int ae_shutdown(device_t dev);
125 static int ae_suspend(device_t dev);
126 static void ae_powersave_disable(ae_softc_t *sc);
127 static void ae_powersave_enable(ae_softc_t *sc);
128 static int ae_resume(device_t dev);
129 static unsigned int ae_tx_avail_size(ae_softc_t *sc);
130 static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
131 static void ae_start(struct ifnet *ifp);
132 static void ae_start_locked(struct ifnet *ifp);
133 static void ae_link_task(void *arg, int pending);
134 static void ae_stop_rxmac(ae_softc_t *sc);
135 static void ae_stop_txmac(ae_softc_t *sc);
136 static void ae_mac_config(ae_softc_t *sc);
137 static int ae_intr(void *arg);
138 static void ae_int_task(void *arg, int pending);
139 static void ae_tx_intr(ae_softc_t *sc);
140 static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
141 static void ae_rx_intr(ae_softc_t *sc);
142 static void ae_watchdog(ae_softc_t *sc);
143 static void ae_tick(void *arg);
144 static void ae_rxfilter(ae_softc_t *sc);
145 static void ae_rxvlan(ae_softc_t *sc);
146 static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
147 static void ae_stop(ae_softc_t *sc);
148 static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
149 static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
150 static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
151 static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
152 static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
153 static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
154 static void ae_init_tunables(ae_softc_t *sc);
155
156 static device_method_t ae_methods[] = {
157 /* Device interface. */
158 DEVMETHOD(device_probe, ae_probe),
159 DEVMETHOD(device_attach, ae_attach),
160 DEVMETHOD(device_detach, ae_detach),
161 DEVMETHOD(device_shutdown, ae_shutdown),
162 DEVMETHOD(device_suspend, ae_suspend),
163 DEVMETHOD(device_resume, ae_resume),
164
165 /* MII interface. */
166 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
167 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
168 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
169 { NULL, NULL }
170 };
171 static driver_t ae_driver = {
172 "ae",
173 ae_methods,
174 sizeof(ae_softc_t)
175 };
176
177 DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
178 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
179 nitems(ae_devs));
180 DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
181 MODULE_DEPEND(ae, pci, 1, 1, 1);
182 MODULE_DEPEND(ae, ether, 1, 1, 1);
183 MODULE_DEPEND(ae, miibus, 1, 1, 1);
184
185 /*
186 * Tunables.
187 */
188 static int msi_disable = 0;
189 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
190
191 #define AE_READ_4(sc, reg) \
192 bus_read_4((sc)->mem[0], (reg))
193 #define AE_READ_2(sc, reg) \
194 bus_read_2((sc)->mem[0], (reg))
195 #define AE_READ_1(sc, reg) \
196 bus_read_1((sc)->mem[0], (reg))
197 #define AE_WRITE_4(sc, reg, val) \
198 bus_write_4((sc)->mem[0], (reg), (val))
199 #define AE_WRITE_2(sc, reg, val) \
200 bus_write_2((sc)->mem[0], (reg), (val))
201 #define AE_WRITE_1(sc, reg, val) \
202 bus_write_1((sc)->mem[0], (reg), (val))
203 #define AE_PHY_READ(sc, reg) \
204 ae_miibus_readreg(sc->dev, 0, reg)
205 #define AE_PHY_WRITE(sc, reg, val) \
206 ae_miibus_writereg(sc->dev, 0, reg, val)
207 #define AE_CHECK_EADDR_VALID(eaddr) \
208 ((eaddr[0] == 0 && eaddr[1] == 0) || \
209 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
210 #define AE_RXD_VLAN(vtag) \
211 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
212 #define AE_TXD_VLAN(vtag) \
213 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
214
215 static int
216 ae_probe(device_t dev)
217 {
218 uint16_t deviceid, vendorid;
219 int i;
220
221 vendorid = pci_get_vendor(dev);
222 deviceid = pci_get_device(dev);
223
224 /*
225 * Search through the list of supported devs for matching one.
226 */
227 for (i = 0; i < AE_DEVS_COUNT; i++) {
228 if (vendorid == ae_devs[i].vendorid &&
229 deviceid == ae_devs[i].deviceid) {
230 device_set_desc(dev, ae_devs[i].name);
231 return (BUS_PROBE_DEFAULT);
232 }
233 }
234 return (ENXIO);
235 }
236
237 static int
238 ae_attach(device_t dev)
239 {
240 ae_softc_t *sc;
241 struct ifnet *ifp;
242 uint8_t chiprev;
243 uint32_t pcirev;
244 int nmsi, pmc;
245 int error;
246
247 sc = device_get_softc(dev); /* Automatically allocated and zeroed
248 on attach. */
249 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
250 sc->dev = dev;
251
252 /*
253 * Initialize mutexes and tasks.
254 */
255 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
256 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
257 TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
258 TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
259
260 pci_enable_busmaster(dev); /* Enable bus mastering. */
261
262 sc->spec_mem = ae_res_spec_mem;
263
264 /*
265 * Allocate memory-mapped registers.
266 */
267 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
268 if (error != 0) {
269 device_printf(dev, "could not allocate memory resources.\n");
270 sc->spec_mem = NULL;
271 goto fail;
272 }
273
274 /*
275 * Retrieve PCI and chip revisions.
276 */
277 pcirev = pci_get_revid(dev);
278 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
279 AE_MASTER_REVNUM_MASK;
280 if (bootverbose) {
281 device_printf(dev, "pci device revision: %#04x\n", pcirev);
282 device_printf(dev, "chip id: %#02x\n", chiprev);
283 }
284 nmsi = pci_msi_count(dev);
285 if (bootverbose)
286 device_printf(dev, "MSI count: %d.\n", nmsi);
287
288 /*
289 * Allocate interrupt resources.
290 */
291 if (msi_disable == 0 && nmsi == 1) {
292 error = pci_alloc_msi(dev, &nmsi);
293 if (error == 0) {
294 device_printf(dev, "Using MSI messages.\n");
295 sc->spec_irq = ae_res_spec_msi;
296 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
297 if (error != 0) {
298 device_printf(dev, "MSI allocation failed.\n");
299 sc->spec_irq = NULL;
300 pci_release_msi(dev);
301 } else {
302 sc->flags |= AE_FLAG_MSI;
303 }
304 }
305 }
306 if (sc->spec_irq == NULL) {
307 sc->spec_irq = ae_res_spec_irq;
308 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
309 if (error != 0) {
310 device_printf(dev, "could not allocate IRQ resources.\n");
311 sc->spec_irq = NULL;
312 goto fail;
313 }
314 }
315
316 ae_init_tunables(sc);
317
318 ae_phy_reset(sc); /* Reset PHY. */
319 error = ae_reset(sc); /* Reset the controller itself. */
320 if (error != 0)
321 goto fail;
322
323 ae_pcie_init(sc);
324
325 ae_retrieve_address(sc); /* Load MAC address. */
326
327 error = ae_alloc_rings(sc); /* Allocate ring buffers. */
328 if (error != 0)
329 goto fail;
330
331 ifp = sc->ifp = if_alloc(IFT_ETHER);
332 if (ifp == NULL) {
333 device_printf(dev, "could not allocate ifnet structure.\n");
334 error = ENXIO;
335 goto fail;
336 }
337
338 ifp->if_softc = sc;
339 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
340 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
341 ifp->if_ioctl = ae_ioctl;
342 ifp->if_start = ae_start;
343 ifp->if_init = ae_init;
344 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
345 ifp->if_hwassist = 0;
346 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
347 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
348 IFQ_SET_READY(&ifp->if_snd);
349 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
350 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
351 sc->flags |= AE_FLAG_PMG;
352 }
353 ifp->if_capenable = ifp->if_capabilities;
354
355 /*
356 * Configure and attach MII bus.
357 */
358 error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
359 ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
360 MII_OFFSET_ANY, 0);
361 if (error != 0) {
362 device_printf(dev, "attaching PHYs failed\n");
363 goto fail;
364 }
365
366 ether_ifattach(ifp, sc->eaddr);
367 /* Tell the upper layer(s) we support long frames. */
368 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
369
370 /*
371 * Create and run all helper tasks.
372 */
373 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
374 taskqueue_thread_enqueue, &sc->tq);
375 if (sc->tq == NULL) {
376 device_printf(dev, "could not create taskqueue.\n");
377 ether_ifdetach(ifp);
378 error = ENXIO;
379 goto fail;
380 }
381 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
382 device_get_nameunit(sc->dev));
383
384 /*
385 * Configure interrupt handlers.
386 */
387 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
388 ae_intr, NULL, sc, &sc->intrhand);
389 if (error != 0) {
390 device_printf(dev, "could not set up interrupt handler.\n");
391 taskqueue_free(sc->tq);
392 sc->tq = NULL;
393 ether_ifdetach(ifp);
394 goto fail;
395 }
396
397 fail:
398 if (error != 0)
399 ae_detach(dev);
400
401 return (error);
402 }
403
404 #define AE_SYSCTL(stx, parent, name, desc, ptr) \
405 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
406
407 static void
408 ae_init_tunables(ae_softc_t *sc)
409 {
410 struct sysctl_ctx_list *ctx;
411 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
412 struct ae_stats *ae_stats;
413
414 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
415 ae_stats = &sc->stats;
416
417 ctx = device_get_sysctl_ctx(sc->dev);
418 root = device_get_sysctl_tree(sc->dev);
419 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
420 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
421
422 /*
423 * Receiver statistcics.
424 */
425 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
426 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
427 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
428 "broadcast frames", &ae_stats->rx_bcast);
429 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
430 "multicast frames", &ae_stats->rx_mcast);
431 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
432 "PAUSE frames", &ae_stats->rx_pause);
433 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
434 "control frames", &ae_stats->rx_ctrl);
435 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
436 "frames with CRC errors", &ae_stats->rx_crcerr);
437 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
438 "frames with invalid opcode", &ae_stats->rx_codeerr);
439 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
440 "runt frames", &ae_stats->rx_runt);
441 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
442 "fragmented frames", &ae_stats->rx_frag);
443 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
444 "frames with alignment errors", &ae_stats->rx_align);
445 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
446 "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
447
448 /*
449 * Receiver statistcics.
450 */
451 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
452 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
453 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
454 "broadcast frames", &ae_stats->tx_bcast);
455 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
456 "multicast frames", &ae_stats->tx_mcast);
457 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
458 "PAUSE frames", &ae_stats->tx_pause);
459 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
460 "control frames", &ae_stats->tx_ctrl);
461 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
462 "deferrals occuried", &ae_stats->tx_defer);
463 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
464 "excessive deferrals occuried", &ae_stats->tx_excdefer);
465 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
466 "single collisions occuried", &ae_stats->tx_singlecol);
467 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
468 "multiple collisions occuried", &ae_stats->tx_multicol);
469 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
470 "late collisions occuried", &ae_stats->tx_latecol);
471 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
472 "transmit aborts due collisions", &ae_stats->tx_abortcol);
473 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
474 "Tx FIFO underruns", &ae_stats->tx_underrun);
475 }
476
477 static void
478 ae_pcie_init(ae_softc_t *sc)
479 {
480
481 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
482 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
483 }
484
485 static void
486 ae_phy_reset(ae_softc_t *sc)
487 {
488
489 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
490 DELAY(1000); /* XXX: pause(9) ? */
491 }
492
493 static int
494 ae_reset(ae_softc_t *sc)
495 {
496 int i;
497
498 /*
499 * Issue a soft reset.
500 */
501 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
502 bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
503 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
504
505 /*
506 * Wait for reset to complete.
507 */
508 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
509 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
510 break;
511 DELAY(10);
512 }
513 if (i == AE_RESET_TIMEOUT) {
514 device_printf(sc->dev, "reset timeout.\n");
515 return (ENXIO);
516 }
517
518 /*
519 * Wait for everything to enter idle state.
520 */
521 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
522 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
523 break;
524 DELAY(100);
525 }
526 if (i == AE_IDLE_TIMEOUT) {
527 device_printf(sc->dev, "could not enter idle state.\n");
528 return (ENXIO);
529 }
530 return (0);
531 }
532
533 static void
534 ae_init(void *arg)
535 {
536 ae_softc_t *sc;
537
538 sc = (ae_softc_t *)arg;
539 AE_LOCK(sc);
540 ae_init_locked(sc);
541 AE_UNLOCK(sc);
542 }
543
544 static void
545 ae_phy_init(ae_softc_t *sc)
546 {
547
548 /*
549 * Enable link status change interrupt.
550 * XXX magic numbers.
551 */
552 #ifdef notyet
553 AE_PHY_WRITE(sc, 18, 0xc00);
554 #endif
555 }
556
557 static int
558 ae_init_locked(ae_softc_t *sc)
559 {
560 struct ifnet *ifp;
561 struct mii_data *mii;
562 uint8_t eaddr[ETHER_ADDR_LEN];
563 uint32_t val;
564 bus_addr_t addr;
565
566 AE_LOCK_ASSERT(sc);
567
568 ifp = sc->ifp;
569 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
570 return (0);
571 mii = device_get_softc(sc->miibus);
572
573 ae_stop(sc);
574 ae_reset(sc);
575 ae_pcie_init(sc); /* Initialize PCIE stuff. */
576 ae_phy_init(sc);
577 ae_powersave_disable(sc);
578
579 /*
580 * Clear and disable interrupts.
581 */
582 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
583
584 /*
585 * Set the MAC address.
586 */
587 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
588 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
589 AE_WRITE_4(sc, AE_EADDR0_REG, val);
590 val = eaddr[0] << 8 | eaddr[1];
591 AE_WRITE_4(sc, AE_EADDR1_REG, val);
592
593 bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
594 bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
595 bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
596 /*
597 * Set ring buffers base addresses.
598 */
599 addr = sc->dma_rxd_busaddr;
600 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
601 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
602 addr = sc->dma_txd_busaddr;
603 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
604 addr = sc->dma_txs_busaddr;
605 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
606
607 /*
608 * Configure ring buffers sizes.
609 */
610 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
611 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
612 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
613
614 /*
615 * Configure interframe gap parameters.
616 */
617 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
618 AE_IFG_TXIPG_MASK) |
619 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
620 AE_IFG_RXIPG_MASK) |
621 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
622 AE_IFG_IPGR1_MASK) |
623 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
624 AE_IFG_IPGR2_MASK);
625 AE_WRITE_4(sc, AE_IFG_REG, val);
626
627 /*
628 * Configure half-duplex operation.
629 */
630 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
631 AE_HDPX_LCOL_MASK) |
632 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
633 AE_HDPX_RETRY_MASK) |
634 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
635 AE_HDPX_ABEBT_MASK) |
636 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
637 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
638 AE_WRITE_4(sc, AE_HDPX_REG, val);
639
640 /*
641 * Configure interrupt moderate timer.
642 */
643 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
644 val = AE_READ_4(sc, AE_MASTER_REG);
645 val |= AE_MASTER_IMT_EN;
646 AE_WRITE_4(sc, AE_MASTER_REG, val);
647
648 /*
649 * Configure interrupt clearing timer.
650 */
651 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
652
653 /*
654 * Configure MTU.
655 */
656 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
657 ETHER_CRC_LEN;
658 AE_WRITE_2(sc, AE_MTU_REG, val);
659
660 /*
661 * Configure cut-through threshold.
662 */
663 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
664
665 /*
666 * Configure flow control.
667 */
668 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
669 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
670 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
671 (AE_RXD_COUNT_DEFAULT / 12));
672
673 /*
674 * Init mailboxes.
675 */
676 sc->txd_cur = sc->rxd_cur = 0;
677 sc->txs_ack = sc->txd_ack = 0;
678 sc->rxd_cur = 0;
679 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
680 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
681
682 sc->tx_inproc = 0; /* Number of packets the chip processes now. */
683 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
684
685 /*
686 * Enable DMA.
687 */
688 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
689 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
690
691 /*
692 * Check if everything is OK.
693 */
694 val = AE_READ_4(sc, AE_ISR_REG);
695 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
696 device_printf(sc->dev, "Initialization failed.\n");
697 return (ENXIO);
698 }
699
700 /*
701 * Clear interrupt status.
702 */
703 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
704 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
705
706 /*
707 * Enable interrupts.
708 */
709 val = AE_READ_4(sc, AE_MASTER_REG);
710 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
711 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
712
713 /*
714 * Disable WOL.
715 */
716 AE_WRITE_4(sc, AE_WOL_REG, 0);
717
718 /*
719 * Configure MAC.
720 */
721 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
722 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
723 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
724 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
725 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
726 AE_MAC_PREAMBLE_MASK);
727 AE_WRITE_4(sc, AE_MAC_REG, val);
728
729 /*
730 * Configure Rx MAC.
731 */
732 ae_rxfilter(sc);
733 ae_rxvlan(sc);
734
735 /*
736 * Enable Tx/Rx.
737 */
738 val = AE_READ_4(sc, AE_MAC_REG);
739 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
740
741 sc->flags &= ~AE_FLAG_LINK;
742 mii_mediachg(mii); /* Switch to the current media. */
743
744 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
745
746 ifp->if_drv_flags |= IFF_DRV_RUNNING;
747 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
748
749 #ifdef AE_DEBUG
750 device_printf(sc->dev, "Initialization complete.\n");
751 #endif
752
753 return (0);
754 }
755
756 static int
757 ae_detach(device_t dev)
758 {
759 struct ae_softc *sc;
760 struct ifnet *ifp;
761
762 sc = device_get_softc(dev);
763 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
764 ifp = sc->ifp;
765 if (device_is_attached(dev)) {
766 AE_LOCK(sc);
767 sc->flags |= AE_FLAG_DETACH;
768 ae_stop(sc);
769 AE_UNLOCK(sc);
770 callout_drain(&sc->tick_ch);
771 taskqueue_drain(sc->tq, &sc->int_task);
772 taskqueue_drain(taskqueue_swi, &sc->link_task);
773 ether_ifdetach(ifp);
774 }
775 if (sc->tq != NULL) {
776 taskqueue_drain(sc->tq, &sc->int_task);
777 taskqueue_free(sc->tq);
778 sc->tq = NULL;
779 }
780 if (sc->miibus != NULL) {
781 device_delete_child(dev, sc->miibus);
782 sc->miibus = NULL;
783 }
784 bus_generic_detach(sc->dev);
785 ae_dma_free(sc);
786 if (sc->intrhand != NULL) {
787 bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
788 sc->intrhand = NULL;
789 }
790 if (ifp != NULL) {
791 if_free(ifp);
792 sc->ifp = NULL;
793 }
794 if (sc->spec_irq != NULL)
795 bus_release_resources(dev, sc->spec_irq, sc->irq);
796 if (sc->spec_mem != NULL)
797 bus_release_resources(dev, sc->spec_mem, sc->mem);
798 if ((sc->flags & AE_FLAG_MSI) != 0)
799 pci_release_msi(dev);
800 mtx_destroy(&sc->mtx);
801
802 return (0);
803 }
804
805 static int
806 ae_miibus_readreg(device_t dev, int phy, int reg)
807 {
808 ae_softc_t *sc;
809 uint32_t val;
810 int i;
811
812 sc = device_get_softc(dev);
813 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
814
815 /*
816 * Locking is done in upper layers.
817 */
818
819 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
820 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
821 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
822 AE_WRITE_4(sc, AE_MDIO_REG, val);
823
824 /*
825 * Wait for operation to complete.
826 */
827 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
828 DELAY(2);
829 val = AE_READ_4(sc, AE_MDIO_REG);
830 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
831 break;
832 }
833 if (i == AE_MDIO_TIMEOUT) {
834 device_printf(sc->dev, "phy read timeout: %d.\n", reg);
835 return (0);
836 }
837 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
838 }
839
840 static int
841 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
842 {
843 ae_softc_t *sc;
844 uint32_t aereg;
845 int i;
846
847 sc = device_get_softc(dev);
848 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
849
850 /*
851 * Locking is done in upper layers.
852 */
853
854 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
855 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
856 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
857 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
858 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
859
860 /*
861 * Wait for operation to complete.
862 */
863 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
864 DELAY(2);
865 aereg = AE_READ_4(sc, AE_MDIO_REG);
866 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
867 break;
868 }
869 if (i == AE_MDIO_TIMEOUT) {
870 device_printf(sc->dev, "phy write timeout: %d.\n", reg);
871 }
872 return (0);
873 }
874
875 static void
876 ae_miibus_statchg(device_t dev)
877 {
878 ae_softc_t *sc;
879
880 sc = device_get_softc(dev);
881 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
882 }
883
884 static void
885 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
886 {
887 ae_softc_t *sc;
888 struct mii_data *mii;
889
890 sc = ifp->if_softc;
891 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
892
893 AE_LOCK(sc);
894 mii = device_get_softc(sc->miibus);
895 mii_pollstat(mii);
896 ifmr->ifm_status = mii->mii_media_status;
897 ifmr->ifm_active = mii->mii_media_active;
898 AE_UNLOCK(sc);
899 }
900
901 static int
902 ae_mediachange(struct ifnet *ifp)
903 {
904 ae_softc_t *sc;
905 struct mii_data *mii;
906 struct mii_softc *mii_sc;
907 int error;
908
909 /* XXX: check IFF_UP ?? */
910 sc = ifp->if_softc;
911 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
912 AE_LOCK(sc);
913 mii = device_get_softc(sc->miibus);
914 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
915 PHY_RESET(mii_sc);
916 error = mii_mediachg(mii);
917 AE_UNLOCK(sc);
918
919 return (error);
920 }
921
922 static int
923 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
924 {
925 int error;
926 uint32_t val;
927
928 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
929
930 /*
931 * Not sure why, but Linux does this.
932 */
933 val = AE_READ_4(sc, AE_SPICTL_REG);
934 if ((val & AE_SPICTL_VPD_EN) != 0) {
935 val &= ~AE_SPICTL_VPD_EN;
936 AE_WRITE_4(sc, AE_SPICTL_REG, val);
937 }
938 error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
939 return (error);
940 }
941
942 static int
943 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
944 {
945 uint32_t val;
946 int i;
947
948 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
949
950 /*
951 * VPD registers start at offset 0x100. Read them.
952 */
953 val = 0x100 + reg * 4;
954 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
955 AE_VPD_CAP_ADDR_MASK);
956 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
957 DELAY(2000);
958 val = AE_READ_4(sc, AE_VPD_CAP_REG);
959 if ((val & AE_VPD_CAP_DONE) != 0)
960 break;
961 }
962 if (i == AE_VPD_TIMEOUT) {
963 device_printf(sc->dev, "timeout reading VPD register %d.\n",
964 reg);
965 return (ETIMEDOUT);
966 }
967 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
968 return (0);
969 }
970
971 static int
972 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
973 {
974 uint32_t word, reg, val;
975 int error;
976 int found;
977 int vpdc;
978 int i;
979
980 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
981 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
982
983 /*
984 * Check for EEPROM.
985 */
986 error = ae_check_eeprom_present(sc, &vpdc);
987 if (error != 0)
988 return (error);
989
990 /*
991 * Read the VPD configuration space.
992 * Each register is prefixed with signature,
993 * so we can check if it is valid.
994 */
995 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
996 error = ae_vpd_read_word(sc, i, &word);
997 if (error != 0)
998 break;
999
1000 /*
1001 * Check signature.
1002 */
1003 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1004 break;
1005 reg = word >> AE_VPD_REG_SHIFT;
1006 i++; /* Move to the next word. */
1007
1008 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1009 continue;
1010
1011 error = ae_vpd_read_word(sc, i, &val);
1012 if (error != 0)
1013 break;
1014 if (reg == AE_EADDR0_REG)
1015 eaddr[0] = val;
1016 else
1017 eaddr[1] = val;
1018 found++;
1019 }
1020
1021 if (found < 2)
1022 return (ENOENT);
1023
1024 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1025 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1026 if (bootverbose)
1027 device_printf(sc->dev,
1028 "VPD ethernet address registers are invalid.\n");
1029 return (EINVAL);
1030 }
1031 return (0);
1032 }
1033
1034 static int
1035 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1036 {
1037
1038 /*
1039 * BIOS is supposed to set this.
1040 */
1041 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1042 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1043 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1044
1045 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1046 if (bootverbose)
1047 device_printf(sc->dev,
1048 "Ethernet address registers are invalid.\n");
1049 return (EINVAL);
1050 }
1051 return (0);
1052 }
1053
1054 static void
1055 ae_retrieve_address(ae_softc_t *sc)
1056 {
1057 uint32_t eaddr[2] = {0, 0};
1058 int error;
1059
1060 /*
1061 *Check for EEPROM.
1062 */
1063 error = ae_get_vpd_eaddr(sc, eaddr);
1064 if (error != 0)
1065 error = ae_get_reg_eaddr(sc, eaddr);
1066 if (error != 0) {
1067 if (bootverbose)
1068 device_printf(sc->dev,
1069 "Generating random ethernet address.\n");
1070 eaddr[0] = arc4random();
1071
1072 /*
1073 * Set OUI to ASUSTek COMPUTER INC.
1074 */
1075 sc->eaddr[0] = 0x02; /* U/L bit set. */
1076 sc->eaddr[1] = 0x1f;
1077 sc->eaddr[2] = 0xc6;
1078 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1079 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1080 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1081 } else {
1082 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1083 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1084 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1085 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1086 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1087 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1088 }
1089 }
1090
1091 static void
1092 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1093 {
1094 bus_addr_t *addr = arg;
1095
1096 if (error != 0)
1097 return;
1098 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1099 nsegs));
1100 *addr = segs[0].ds_addr;
1101 }
1102
1103 static int
1104 ae_alloc_rings(ae_softc_t *sc)
1105 {
1106 bus_addr_t busaddr;
1107 int error;
1108
1109 /*
1110 * Create parent DMA tag.
1111 */
1112 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1113 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1114 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1115 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1116 &sc->dma_parent_tag);
1117 if (error != 0) {
1118 device_printf(sc->dev, "could not creare parent DMA tag.\n");
1119 return (error);
1120 }
1121
1122 /*
1123 * Create DMA tag for TxD.
1124 */
1125 error = bus_dma_tag_create(sc->dma_parent_tag,
1126 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1127 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1128 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1129 &sc->dma_txd_tag);
1130 if (error != 0) {
1131 device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1132 return (error);
1133 }
1134
1135 /*
1136 * Create DMA tag for TxS.
1137 */
1138 error = bus_dma_tag_create(sc->dma_parent_tag,
1139 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1140 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1141 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1142 &sc->dma_txs_tag);
1143 if (error != 0) {
1144 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1145 return (error);
1146 }
1147
1148 /*
1149 * Create DMA tag for RxD.
1150 */
1151 error = bus_dma_tag_create(sc->dma_parent_tag,
1152 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1153 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
1154 AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
1155 &sc->dma_rxd_tag);
1156 if (error != 0) {
1157 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1158 return (error);
1159 }
1160
1161 /*
1162 * Allocate TxD DMA memory.
1163 */
1164 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1165 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1166 &sc->dma_txd_map);
1167 if (error != 0) {
1168 device_printf(sc->dev,
1169 "could not allocate DMA memory for TxD ring.\n");
1170 return (error);
1171 }
1172 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1173 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1174 if (error != 0 || busaddr == 0) {
1175 device_printf(sc->dev,
1176 "could not load DMA map for TxD ring.\n");
1177 return (error);
1178 }
1179 sc->dma_txd_busaddr = busaddr;
1180
1181 /*
1182 * Allocate TxS DMA memory.
1183 */
1184 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1185 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1186 &sc->dma_txs_map);
1187 if (error != 0) {
1188 device_printf(sc->dev,
1189 "could not allocate DMA memory for TxS ring.\n");
1190 return (error);
1191 }
1192 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1193 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1194 if (error != 0 || busaddr == 0) {
1195 device_printf(sc->dev,
1196 "could not load DMA map for TxS ring.\n");
1197 return (error);
1198 }
1199 sc->dma_txs_busaddr = busaddr;
1200
1201 /*
1202 * Allocate RxD DMA memory.
1203 */
1204 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1205 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1206 &sc->dma_rxd_map);
1207 if (error != 0) {
1208 device_printf(sc->dev,
1209 "could not allocate DMA memory for RxD ring.\n");
1210 return (error);
1211 }
1212 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1213 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
1214 ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1215 if (error != 0 || busaddr == 0) {
1216 device_printf(sc->dev,
1217 "could not load DMA map for RxD ring.\n");
1218 return (error);
1219 }
1220 sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
1221 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
1222
1223 return (0);
1224 }
1225
1226 static void
1227 ae_dma_free(ae_softc_t *sc)
1228 {
1229
1230 if (sc->dma_txd_tag != NULL) {
1231 if (sc->dma_txd_busaddr != 0)
1232 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1233 if (sc->txd_base != NULL)
1234 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1235 sc->dma_txd_map);
1236 bus_dma_tag_destroy(sc->dma_txd_tag);
1237 sc->dma_txd_tag = NULL;
1238 sc->txd_base = NULL;
1239 sc->dma_txd_busaddr = 0;
1240 }
1241 if (sc->dma_txs_tag != NULL) {
1242 if (sc->dma_txs_busaddr != 0)
1243 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1244 if (sc->txs_base != NULL)
1245 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1246 sc->dma_txs_map);
1247 bus_dma_tag_destroy(sc->dma_txs_tag);
1248 sc->dma_txs_tag = NULL;
1249 sc->txs_base = NULL;
1250 sc->dma_txs_busaddr = 0;
1251 }
1252 if (sc->dma_rxd_tag != NULL) {
1253 if (sc->dma_rxd_busaddr != 0)
1254 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1255 if (sc->rxd_base_dma != NULL)
1256 bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
1257 sc->dma_rxd_map);
1258 bus_dma_tag_destroy(sc->dma_rxd_tag);
1259 sc->dma_rxd_tag = NULL;
1260 sc->rxd_base_dma = NULL;
1261 sc->dma_rxd_busaddr = 0;
1262 }
1263 if (sc->dma_parent_tag != NULL) {
1264 bus_dma_tag_destroy(sc->dma_parent_tag);
1265 sc->dma_parent_tag = NULL;
1266 }
1267 }
1268
1269 static int
1270 ae_shutdown(device_t dev)
1271 {
1272 ae_softc_t *sc;
1273 int error;
1274
1275 sc = device_get_softc(dev);
1276 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1277
1278 error = ae_suspend(dev);
1279 AE_LOCK(sc);
1280 ae_powersave_enable(sc);
1281 AE_UNLOCK(sc);
1282 return (error);
1283 }
1284
1285 static void
1286 ae_powersave_disable(ae_softc_t *sc)
1287 {
1288 uint32_t val;
1289
1290 AE_LOCK_ASSERT(sc);
1291
1292 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1293 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1294 if (val & AE_PHY_DBG_POWERSAVE) {
1295 val &= ~AE_PHY_DBG_POWERSAVE;
1296 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1297 DELAY(1000);
1298 }
1299 }
1300
1301 static void
1302 ae_powersave_enable(ae_softc_t *sc)
1303 {
1304 uint32_t val;
1305
1306 AE_LOCK_ASSERT(sc);
1307
1308 /*
1309 * XXX magic numbers.
1310 */
1311 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1312 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1313 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1314 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1315 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1316 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1317 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1318 }
1319
1320 static void
1321 ae_pm_init(ae_softc_t *sc)
1322 {
1323 struct ifnet *ifp;
1324 uint32_t val;
1325 uint16_t pmstat;
1326 struct mii_data *mii;
1327 int pmc;
1328
1329 AE_LOCK_ASSERT(sc);
1330
1331 ifp = sc->ifp;
1332 if ((sc->flags & AE_FLAG_PMG) == 0) {
1333 /* Disable WOL entirely. */
1334 AE_WRITE_4(sc, AE_WOL_REG, 0);
1335 return;
1336 }
1337
1338 /*
1339 * Configure WOL if enabled.
1340 */
1341 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1342 mii = device_get_softc(sc->miibus);
1343 mii_pollstat(mii);
1344 if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1345 (mii->mii_media_status & IFM_ACTIVE) != 0) {
1346 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1347 AE_WOL_MAGIC_PME);
1348
1349 /*
1350 * Configure MAC.
1351 */
1352 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1353 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1354 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1355 AE_HALFBUF_MASK) | \
1356 ((AE_MAC_PREAMBLE_DEFAULT << \
1357 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1358 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1359 if ((IFM_OPTIONS(mii->mii_media_active) & \
1360 IFM_FDX) != 0)
1361 val |= AE_MAC_FULL_DUPLEX;
1362 AE_WRITE_4(sc, AE_MAC_REG, val);
1363
1364 } else { /* No link. */
1365 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1366 AE_WOL_LNKCHG_PME);
1367 AE_WRITE_4(sc, AE_MAC_REG, 0);
1368 }
1369 } else {
1370 ae_powersave_enable(sc);
1371 }
1372
1373 /*
1374 * PCIE hacks. Magic numbers.
1375 */
1376 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1377 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1378 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1379 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1380 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1381 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1382
1383 /*
1384 * Configure PME.
1385 */
1386 if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
1387 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
1388 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1389 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1390 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1391 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1392 }
1393 }
1394
1395 static int
1396 ae_suspend(device_t dev)
1397 {
1398 ae_softc_t *sc;
1399
1400 sc = device_get_softc(dev);
1401
1402 AE_LOCK(sc);
1403 ae_stop(sc);
1404 ae_pm_init(sc);
1405 AE_UNLOCK(sc);
1406
1407 return (0);
1408 }
1409
1410 static int
1411 ae_resume(device_t dev)
1412 {
1413 ae_softc_t *sc;
1414
1415 sc = device_get_softc(dev);
1416 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1417
1418 AE_LOCK(sc);
1419 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1420 if ((sc->ifp->if_flags & IFF_UP) != 0)
1421 ae_init_locked(sc);
1422 AE_UNLOCK(sc);
1423
1424 return (0);
1425 }
1426
1427 static unsigned int
1428 ae_tx_avail_size(ae_softc_t *sc)
1429 {
1430 unsigned int avail;
1431
1432 if (sc->txd_cur >= sc->txd_ack)
1433 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1434 else
1435 avail = sc->txd_ack - sc->txd_cur;
1436
1437 return (avail);
1438 }
1439
1440 static int
1441 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1442 {
1443 struct mbuf *m0;
1444 ae_txd_t *hdr;
1445 unsigned int to_end;
1446 uint16_t len;
1447
1448 AE_LOCK_ASSERT(sc);
1449
1450 m0 = *m_head;
1451 len = m0->m_pkthdr.len;
1452
1453 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1454 len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
1455 #ifdef AE_DEBUG
1456 if_printf(sc->ifp, "No free Tx available.\n");
1457 #endif
1458 return ENOBUFS;
1459 }
1460
1461 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1462 bzero(hdr, sizeof(*hdr));
1463 /* Skip header size. */
1464 sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
1465 /* Space available to the end of the ring */
1466 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1467 if (to_end >= len) {
1468 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1469 } else {
1470 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1471 sc->txd_cur));
1472 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1473 }
1474
1475 /*
1476 * Set TxD flags and parameters.
1477 */
1478 if ((m0->m_flags & M_VLANTAG) != 0) {
1479 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1480 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1481 } else {
1482 hdr->len = htole16(len);
1483 }
1484
1485 /*
1486 * Set current TxD position and round up to a 4-byte boundary.
1487 */
1488 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1489 if (sc->txd_cur == sc->txd_ack)
1490 sc->flags &= ~AE_FLAG_TXAVAIL;
1491 #ifdef AE_DEBUG
1492 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1493 #endif
1494
1495 /*
1496 * Update TxS position and check if there are empty TxS available.
1497 */
1498 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1499 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1500 if (sc->txs_cur == sc->txs_ack)
1501 sc->flags &= ~AE_FLAG_TXAVAIL;
1502
1503 /*
1504 * Synchronize DMA memory.
1505 */
1506 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1507 BUS_DMASYNC_PREWRITE);
1508 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1509 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1510
1511 return (0);
1512 }
1513
1514 static void
1515 ae_start(struct ifnet *ifp)
1516 {
1517 ae_softc_t *sc;
1518
1519 sc = ifp->if_softc;
1520 AE_LOCK(sc);
1521 ae_start_locked(ifp);
1522 AE_UNLOCK(sc);
1523 }
1524
1525 static void
1526 ae_start_locked(struct ifnet *ifp)
1527 {
1528 ae_softc_t *sc;
1529 unsigned int count;
1530 struct mbuf *m0;
1531 int error;
1532
1533 sc = ifp->if_softc;
1534 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1535 AE_LOCK_ASSERT(sc);
1536
1537 #ifdef AE_DEBUG
1538 if_printf(ifp, "Start called.\n");
1539 #endif
1540
1541 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1542 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
1543 return;
1544
1545 count = 0;
1546 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1547 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
1548 if (m0 == NULL)
1549 break; /* Nothing to do. */
1550
1551 error = ae_encap(sc, &m0);
1552 if (error != 0) {
1553 if (m0 != NULL) {
1554 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
1555 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1556 #ifdef AE_DEBUG
1557 if_printf(ifp, "Setting OACTIVE.\n");
1558 #endif
1559 }
1560 break;
1561 }
1562 count++;
1563 sc->tx_inproc++;
1564
1565 /* Bounce a copy of the frame to BPF. */
1566 ETHER_BPF_MTAP(ifp, m0);
1567
1568 m_freem(m0);
1569 }
1570
1571 if (count > 0) { /* Something was dequeued. */
1572 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1573 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1574 #ifdef AE_DEBUG
1575 if_printf(ifp, "%d packets dequeued.\n", count);
1576 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1577 #endif
1578 }
1579 }
1580
1581 static void
1582 ae_link_task(void *arg, int pending)
1583 {
1584 ae_softc_t *sc;
1585 struct mii_data *mii;
1586 struct ifnet *ifp;
1587 uint32_t val;
1588
1589 sc = (ae_softc_t *)arg;
1590 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1591 AE_LOCK(sc);
1592
1593 ifp = sc->ifp;
1594 mii = device_get_softc(sc->miibus);
1595 if (mii == NULL || ifp == NULL ||
1596 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1597 AE_UNLOCK(sc); /* XXX: could happen? */
1598 return;
1599 }
1600
1601 sc->flags &= ~AE_FLAG_LINK;
1602 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1603 (IFM_AVALID | IFM_ACTIVE)) {
1604 switch(IFM_SUBTYPE(mii->mii_media_active)) {
1605 case IFM_10_T:
1606 case IFM_100_TX:
1607 sc->flags |= AE_FLAG_LINK;
1608 break;
1609 default:
1610 break;
1611 }
1612 }
1613
1614 /*
1615 * Stop Rx/Tx MACs.
1616 */
1617 ae_stop_rxmac(sc);
1618 ae_stop_txmac(sc);
1619
1620 if ((sc->flags & AE_FLAG_LINK) != 0) {
1621 ae_mac_config(sc);
1622
1623 /*
1624 * Restart DMA engines.
1625 */
1626 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1627 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1628
1629 /*
1630 * Enable Rx and Tx MACs.
1631 */
1632 val = AE_READ_4(sc, AE_MAC_REG);
1633 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1634 AE_WRITE_4(sc, AE_MAC_REG, val);
1635 }
1636 AE_UNLOCK(sc);
1637 }
1638
1639 static void
1640 ae_stop_rxmac(ae_softc_t *sc)
1641 {
1642 uint32_t val;
1643 int i;
1644
1645 AE_LOCK_ASSERT(sc);
1646
1647 /*
1648 * Stop Rx MAC engine.
1649 */
1650 val = AE_READ_4(sc, AE_MAC_REG);
1651 if ((val & AE_MAC_RX_EN) != 0) {
1652 val &= ~AE_MAC_RX_EN;
1653 AE_WRITE_4(sc, AE_MAC_REG, val);
1654 }
1655
1656 /*
1657 * Stop Rx DMA engine.
1658 */
1659 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1660 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1661
1662 /*
1663 * Wait for IDLE state.
1664 */
1665 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1666 val = AE_READ_4(sc, AE_IDLE_REG);
1667 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1668 break;
1669 DELAY(100);
1670 }
1671 if (i == AE_IDLE_TIMEOUT)
1672 device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1673 }
1674
1675 static void
1676 ae_stop_txmac(ae_softc_t *sc)
1677 {
1678 uint32_t val;
1679 int i;
1680
1681 AE_LOCK_ASSERT(sc);
1682
1683 /*
1684 * Stop Tx MAC engine.
1685 */
1686 val = AE_READ_4(sc, AE_MAC_REG);
1687 if ((val & AE_MAC_TX_EN) != 0) {
1688 val &= ~AE_MAC_TX_EN;
1689 AE_WRITE_4(sc, AE_MAC_REG, val);
1690 }
1691
1692 /*
1693 * Stop Tx DMA engine.
1694 */
1695 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1696 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1697
1698 /*
1699 * Wait for IDLE state.
1700 */
1701 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1702 val = AE_READ_4(sc, AE_IDLE_REG);
1703 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1704 break;
1705 DELAY(100);
1706 }
1707 if (i == AE_IDLE_TIMEOUT)
1708 device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1709 }
1710
1711 static void
1712 ae_mac_config(ae_softc_t *sc)
1713 {
1714 struct mii_data *mii;
1715 uint32_t val;
1716
1717 AE_LOCK_ASSERT(sc);
1718
1719 mii = device_get_softc(sc->miibus);
1720 val = AE_READ_4(sc, AE_MAC_REG);
1721 val &= ~AE_MAC_FULL_DUPLEX;
1722 /* XXX disable AE_MAC_TX_FLOW_EN? */
1723
1724 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1725 val |= AE_MAC_FULL_DUPLEX;
1726
1727 AE_WRITE_4(sc, AE_MAC_REG, val);
1728 }
1729
1730 static int
1731 ae_intr(void *arg)
1732 {
1733 ae_softc_t *sc;
1734 uint32_t val;
1735
1736 sc = (ae_softc_t *)arg;
1737 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1738
1739 val = AE_READ_4(sc, AE_ISR_REG);
1740 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1741 return (FILTER_STRAY);
1742
1743 /* Disable interrupts. */
1744 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1745
1746 /* Schedule interrupt processing. */
1747 taskqueue_enqueue(sc->tq, &sc->int_task);
1748
1749 return (FILTER_HANDLED);
1750 }
1751
1752 static void
1753 ae_int_task(void *arg, int pending)
1754 {
1755 ae_softc_t *sc;
1756 struct ifnet *ifp;
1757 uint32_t val;
1758
1759 sc = (ae_softc_t *)arg;
1760
1761 AE_LOCK(sc);
1762
1763 ifp = sc->ifp;
1764
1765 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
1766 if (val == 0) {
1767 AE_UNLOCK(sc);
1768 return;
1769 }
1770
1771 /*
1772 * Clear interrupts and disable them.
1773 */
1774 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1775
1776 #ifdef AE_DEBUG
1777 if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1778 #endif
1779
1780 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1781 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1782 AE_ISR_PHY_LINKDOWN)) != 0) {
1783 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1784 ae_init_locked(sc);
1785 AE_UNLOCK(sc);
1786 return;
1787 }
1788 if ((val & AE_ISR_TX_EVENT) != 0)
1789 ae_tx_intr(sc);
1790 if ((val & AE_ISR_RX_EVENT) != 0)
1791 ae_rx_intr(sc);
1792 /*
1793 * Re-enable interrupts.
1794 */
1795 AE_WRITE_4(sc, AE_ISR_REG, 0);
1796
1797 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1798 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1799 ae_start_locked(ifp);
1800 }
1801 }
1802
1803 AE_UNLOCK(sc);
1804 }
1805
1806 static void
1807 ae_tx_intr(ae_softc_t *sc)
1808 {
1809 struct ifnet *ifp;
1810 ae_txd_t *txd;
1811 ae_txs_t *txs;
1812 uint16_t flags;
1813
1814 AE_LOCK_ASSERT(sc);
1815
1816 ifp = sc->ifp;
1817
1818 #ifdef AE_DEBUG
1819 if_printf(ifp, "Tx interrupt occuried.\n");
1820 #endif
1821
1822 /*
1823 * Syncronize DMA buffers.
1824 */
1825 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1826 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1827 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1828 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1829
1830 for (;;) {
1831 txs = sc->txs_base + sc->txs_ack;
1832 flags = le16toh(txs->flags);
1833 if ((flags & AE_TXS_UPDATE) == 0)
1834 break;
1835 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1836 /* Update stats. */
1837 ae_update_stats_tx(flags, &sc->stats);
1838
1839 /*
1840 * Update TxS position.
1841 */
1842 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1843 sc->flags |= AE_FLAG_TXAVAIL;
1844
1845 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1846 if (txs->len != txd->len)
1847 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1848 le16toh(txs->len), le16toh(txd->len));
1849
1850 /*
1851 * Move txd ack and align on 4-byte boundary.
1852 */
1853 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
1854 sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1855
1856 if ((flags & AE_TXS_SUCCESS) != 0)
1857 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1858 else
1859 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1860
1861 sc->tx_inproc--;
1862 }
1863
1864 if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
1865 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1866 if (sc->tx_inproc < 0) {
1867 if_printf(ifp, "Received stray Tx interrupt(s).\n");
1868 sc->tx_inproc = 0;
1869 }
1870
1871 if (sc->tx_inproc == 0)
1872 sc->wd_timer = 0; /* Unarm watchdog. */
1873
1874 /*
1875 * Syncronize DMA buffers.
1876 */
1877 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1878 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1879 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1880 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1881 }
1882
1883 static void
1884 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1885 {
1886 struct ifnet *ifp;
1887 struct mbuf *m;
1888 unsigned int size;
1889 uint16_t flags;
1890
1891 AE_LOCK_ASSERT(sc);
1892
1893 ifp = sc->ifp;
1894 flags = le16toh(rxd->flags);
1895
1896 #ifdef AE_DEBUG
1897 if_printf(ifp, "Rx interrupt occuried.\n");
1898 #endif
1899 size = le16toh(rxd->len) - ETHER_CRC_LEN;
1900 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1901 if_printf(ifp, "Runt frame received.");
1902 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1903 return;
1904 }
1905
1906 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1907 if (m == NULL) {
1908 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1909 return;
1910 }
1911
1912 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1913 (flags & AE_RXD_HAS_VLAN) != 0) {
1914 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1915 m->m_flags |= M_VLANTAG;
1916 }
1917
1918 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1919 /*
1920 * Pass it through.
1921 */
1922 AE_UNLOCK(sc);
1923 (*ifp->if_input)(ifp, m);
1924 AE_LOCK(sc);
1925 }
1926
1927 static void
1928 ae_rx_intr(ae_softc_t *sc)
1929 {
1930 ae_rxd_t *rxd;
1931 struct ifnet *ifp;
1932 uint16_t flags;
1933 int count;
1934
1935 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1936
1937 AE_LOCK_ASSERT(sc);
1938
1939 ifp = sc->ifp;
1940
1941 /*
1942 * Syncronize DMA buffers.
1943 */
1944 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1945 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1946
1947 for (count = 0;; count++) {
1948 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1949 flags = le16toh(rxd->flags);
1950 if ((flags & AE_RXD_UPDATE) == 0)
1951 break;
1952 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1953 /* Update stats. */
1954 ae_update_stats_rx(flags, &sc->stats);
1955
1956 /*
1957 * Update position index.
1958 */
1959 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1960
1961 if ((flags & AE_RXD_SUCCESS) != 0)
1962 ae_rxeof(sc, rxd);
1963 else
1964 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1965 }
1966
1967 if (count > 0) {
1968 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1970 /*
1971 * Update Rx index.
1972 */
1973 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1974 }
1975 }
1976
1977 static void
1978 ae_watchdog(ae_softc_t *sc)
1979 {
1980 struct ifnet *ifp;
1981
1982 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1983 AE_LOCK_ASSERT(sc);
1984 ifp = sc->ifp;
1985
1986 if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1987 return; /* Noting to do. */
1988
1989 if ((sc->flags & AE_FLAG_LINK) == 0)
1990 if_printf(ifp, "watchdog timeout (missed link).\n");
1991 else
1992 if_printf(ifp, "watchdog timeout - resetting.\n");
1993
1994 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1995 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1996 ae_init_locked(sc);
1997 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1998 ae_start_locked(ifp);
1999 }
2000
2001 static void
2002 ae_tick(void *arg)
2003 {
2004 ae_softc_t *sc;
2005 struct mii_data *mii;
2006
2007 sc = (ae_softc_t *)arg;
2008 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2009 AE_LOCK_ASSERT(sc);
2010
2011 mii = device_get_softc(sc->miibus);
2012 mii_tick(mii);
2013 ae_watchdog(sc); /* Watchdog check. */
2014 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
2015 }
2016
2017 static void
2018 ae_rxvlan(ae_softc_t *sc)
2019 {
2020 struct ifnet *ifp;
2021 uint32_t val;
2022
2023 AE_LOCK_ASSERT(sc);
2024 ifp = sc->ifp;
2025 val = AE_READ_4(sc, AE_MAC_REG);
2026 val &= ~AE_MAC_RMVLAN_EN;
2027 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2028 val |= AE_MAC_RMVLAN_EN;
2029 AE_WRITE_4(sc, AE_MAC_REG, val);
2030 }
2031
2032 static u_int
2033 ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2034 {
2035 uint32_t crc, *mchash = arg;
2036
2037 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2038 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2039
2040 return (1);
2041 }
2042
2043 static void
2044 ae_rxfilter(ae_softc_t *sc)
2045 {
2046 struct ifnet *ifp;
2047 uint32_t mchash[2];
2048 uint32_t rxcfg;
2049
2050 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2051
2052 AE_LOCK_ASSERT(sc);
2053
2054 ifp = sc->ifp;
2055
2056 rxcfg = AE_READ_4(sc, AE_MAC_REG);
2057 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2058
2059 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2060 rxcfg |= AE_MAC_BCAST_EN;
2061 if ((ifp->if_flags & IFF_PROMISC) != 0)
2062 rxcfg |= AE_MAC_PROMISC_EN;
2063 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2064 rxcfg |= AE_MAC_MCAST_EN;
2065
2066 /*
2067 * Wipe old settings.
2068 */
2069 AE_WRITE_4(sc, AE_REG_MHT0, 0);
2070 AE_WRITE_4(sc, AE_REG_MHT1, 0);
2071 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2072 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2073 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2074 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2075 return;
2076 }
2077
2078 /*
2079 * Load multicast tables.
2080 */
2081 bzero(mchash, sizeof(mchash));
2082 if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
2083 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2084 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2085 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2086 }
2087
2088 static int
2089 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2090 {
2091 struct ae_softc *sc;
2092 struct ifreq *ifr;
2093 struct mii_data *mii;
2094 int error, mask;
2095
2096 sc = ifp->if_softc;
2097 ifr = (struct ifreq *)data;
2098 error = 0;
2099
2100 switch (cmd) {
2101 case SIOCSIFMTU:
2102 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2103 error = EINVAL;
2104 else if (ifp->if_mtu != ifr->ifr_mtu) {
2105 AE_LOCK(sc);
2106 ifp->if_mtu = ifr->ifr_mtu;
2107 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2108 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2109 ae_init_locked(sc);
2110 }
2111 AE_UNLOCK(sc);
2112 }
2113 break;
2114 case SIOCSIFFLAGS:
2115 AE_LOCK(sc);
2116 if ((ifp->if_flags & IFF_UP) != 0) {
2117 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2118 if (((ifp->if_flags ^ sc->if_flags)
2119 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2120 ae_rxfilter(sc);
2121 } else {
2122 if ((sc->flags & AE_FLAG_DETACH) == 0)
2123 ae_init_locked(sc);
2124 }
2125 } else {
2126 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2127 ae_stop(sc);
2128 }
2129 sc->if_flags = ifp->if_flags;
2130 AE_UNLOCK(sc);
2131 break;
2132 case SIOCADDMULTI:
2133 case SIOCDELMULTI:
2134 AE_LOCK(sc);
2135 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2136 ae_rxfilter(sc);
2137 AE_UNLOCK(sc);
2138 break;
2139 case SIOCSIFMEDIA:
2140 case SIOCGIFMEDIA:
2141 mii = device_get_softc(sc->miibus);
2142 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2143 break;
2144 case SIOCSIFCAP:
2145 AE_LOCK(sc);
2146 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2147 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2148 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2149 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2150 ae_rxvlan(sc);
2151 }
2152 VLAN_CAPABILITIES(ifp);
2153 AE_UNLOCK(sc);
2154 break;
2155 default:
2156 error = ether_ioctl(ifp, cmd, data);
2157 break;
2158 }
2159 return (error);
2160 }
2161
2162 static void
2163 ae_stop(ae_softc_t *sc)
2164 {
2165 struct ifnet *ifp;
2166 int i;
2167
2168 AE_LOCK_ASSERT(sc);
2169
2170 ifp = sc->ifp;
2171 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2172 sc->flags &= ~AE_FLAG_LINK;
2173 sc->wd_timer = 0; /* Cancel watchdog. */
2174 callout_stop(&sc->tick_ch);
2175
2176 /*
2177 * Clear and disable interrupts.
2178 */
2179 AE_WRITE_4(sc, AE_IMR_REG, 0);
2180 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2181
2182 /*
2183 * Stop Rx/Tx MACs.
2184 */
2185 ae_stop_txmac(sc);
2186 ae_stop_rxmac(sc);
2187
2188 /*
2189 * Stop DMA engines.
2190 */
2191 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2192 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2193
2194 /*
2195 * Wait for everything to enter idle state.
2196 */
2197 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2198 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2199 break;
2200 DELAY(100);
2201 }
2202 if (i == AE_IDLE_TIMEOUT)
2203 device_printf(sc->dev, "could not enter idle state in stop.\n");
2204 }
2205
2206 static void
2207 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2208 {
2209
2210 if ((flags & AE_TXS_BCAST) != 0)
2211 stats->tx_bcast++;
2212 if ((flags & AE_TXS_MCAST) != 0)
2213 stats->tx_mcast++;
2214 if ((flags & AE_TXS_PAUSE) != 0)
2215 stats->tx_pause++;
2216 if ((flags & AE_TXS_CTRL) != 0)
2217 stats->tx_ctrl++;
2218 if ((flags & AE_TXS_DEFER) != 0)
2219 stats->tx_defer++;
2220 if ((flags & AE_TXS_EXCDEFER) != 0)
2221 stats->tx_excdefer++;
2222 if ((flags & AE_TXS_SINGLECOL) != 0)
2223 stats->tx_singlecol++;
2224 if ((flags & AE_TXS_MULTICOL) != 0)
2225 stats->tx_multicol++;
2226 if ((flags & AE_TXS_LATECOL) != 0)
2227 stats->tx_latecol++;
2228 if ((flags & AE_TXS_ABORTCOL) != 0)
2229 stats->tx_abortcol++;
2230 if ((flags & AE_TXS_UNDERRUN) != 0)
2231 stats->tx_underrun++;
2232 }
2233
2234 static void
2235 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2236 {
2237
2238 if ((flags & AE_RXD_BCAST) != 0)
2239 stats->rx_bcast++;
2240 if ((flags & AE_RXD_MCAST) != 0)
2241 stats->rx_mcast++;
2242 if ((flags & AE_RXD_PAUSE) != 0)
2243 stats->rx_pause++;
2244 if ((flags & AE_RXD_CTRL) != 0)
2245 stats->rx_ctrl++;
2246 if ((flags & AE_RXD_CRCERR) != 0)
2247 stats->rx_crcerr++;
2248 if ((flags & AE_RXD_CODEERR) != 0)
2249 stats->rx_codeerr++;
2250 if ((flags & AE_RXD_RUNT) != 0)
2251 stats->rx_runt++;
2252 if ((flags & AE_RXD_FRAG) != 0)
2253 stats->rx_frag++;
2254 if ((flags & AE_RXD_TRUNC) != 0)
2255 stats->rx_trunc++;
2256 if ((flags & AE_RXD_ALIGN) != 0)
2257 stats->rx_align++;
2258 }
Cache object: faea7d2d99450df9ffac23b62082d8cd
|