The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ae/if_ae.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   24  *
   25  * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
   26  *
   27  * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.4/sys/dev/ae/if_ae.c 236946 2012-06-12 07:53:39Z yongari $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/endian.h>
   37 #include <sys/kernel.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mbuf.h>
   40 #include <sys/rman.h>
   41 #include <sys/module.h>
   42 #include <sys/queue.h>
   43 #include <sys/socket.h>
   44 #include <sys/sockio.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <net/bpf.h>
   49 #include <net/if.h>
   50 #include <net/if_arp.h>
   51 #include <net/ethernet.h>
   52 #include <net/if_dl.h>
   53 #include <net/if_media.h>
   54 #include <net/if_types.h>
   55 #include <net/if_vlan_var.h>
   56 
   57 #include <netinet/in.h>
   58 #include <netinet/in_systm.h>
   59 #include <netinet/ip.h>
   60 #include <netinet/tcp.h>
   61 
   62 #include <dev/mii/mii.h>
   63 #include <dev/mii/miivar.h>
   64 #include <dev/pci/pcireg.h>
   65 #include <dev/pci/pcivar.h>
   66 
   67 #include <machine/bus.h>
   68 
   69 #include "miibus_if.h"
   70 
   71 #include "if_aereg.h"
   72 #include "if_aevar.h"
   73 
   74 /*
   75  * Devices supported by this driver.
   76  */
   77 static struct ae_dev {
   78         uint16_t        vendorid;
   79         uint16_t        deviceid;
   80         const char      *name;
   81 } ae_devs[] = {
   82         { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
   83                 "Attansic Technology Corp, L2 FastEthernet" },
   84 };
   85 #define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs))
   86 
   87 static struct resource_spec ae_res_spec_mem[] = {
   88         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
   89         { -1,                   0,              0 }
   90 };
   91 static struct resource_spec ae_res_spec_irq[] = {
   92         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
   93         { -1,                   0,              0 }
   94 };
   95 static struct resource_spec ae_res_spec_msi[] = {
   96         { SYS_RES_IRQ,          1,              RF_ACTIVE },
   97         { -1,                   0,              0 }
   98 };
   99 
  100 static int      ae_probe(device_t dev);
  101 static int      ae_attach(device_t dev);
  102 static void     ae_pcie_init(ae_softc_t *sc);
  103 static void     ae_phy_reset(ae_softc_t *sc);
  104 static void     ae_phy_init(ae_softc_t *sc);
  105 static int      ae_reset(ae_softc_t *sc);
  106 static void     ae_init(void *arg);
  107 static int      ae_init_locked(ae_softc_t *sc);
  108 static int      ae_detach(device_t dev);
  109 static int      ae_miibus_readreg(device_t dev, int phy, int reg);
  110 static int      ae_miibus_writereg(device_t dev, int phy, int reg, int val);
  111 static void     ae_miibus_statchg(device_t dev);
  112 static void     ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
  113 static int      ae_mediachange(struct ifnet *ifp);
  114 static void     ae_retrieve_address(ae_softc_t *sc);
  115 static void     ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
  116     int error);
  117 static int      ae_alloc_rings(ae_softc_t *sc);
  118 static void     ae_dma_free(ae_softc_t *sc);
  119 static int      ae_shutdown(device_t dev);
  120 static int      ae_suspend(device_t dev);
  121 static void     ae_powersave_disable(ae_softc_t *sc);
  122 static void     ae_powersave_enable(ae_softc_t *sc);
  123 static int      ae_resume(device_t dev);
  124 static unsigned int     ae_tx_avail_size(ae_softc_t *sc);
  125 static int      ae_encap(ae_softc_t *sc, struct mbuf **m_head);
  126 static void     ae_start(struct ifnet *ifp);
  127 static void     ae_start_locked(struct ifnet *ifp);
  128 static void     ae_link_task(void *arg, int pending);
  129 static void     ae_stop_rxmac(ae_softc_t *sc);
  130 static void     ae_stop_txmac(ae_softc_t *sc);
  131 static void     ae_mac_config(ae_softc_t *sc);
  132 static int      ae_intr(void *arg);
  133 static void     ae_int_task(void *arg, int pending);
  134 static void     ae_tx_intr(ae_softc_t *sc);
  135 static int      ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
  136 static void     ae_rx_intr(ae_softc_t *sc);
  137 static void     ae_watchdog(ae_softc_t *sc);
  138 static void     ae_tick(void *arg);
  139 static void     ae_rxfilter(ae_softc_t *sc);
  140 static void     ae_rxvlan(ae_softc_t *sc);
  141 static int      ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
  142 static void     ae_stop(ae_softc_t *sc);
  143 static int      ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
  144 static int      ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
  145 static int      ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
  146 static int      ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
  147 static void     ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
  148 static void     ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
  149 static void     ae_init_tunables(ae_softc_t *sc);
  150 
  151 static device_method_t ae_methods[] = {
  152         /* Device interface. */
  153         DEVMETHOD(device_probe,         ae_probe),
  154         DEVMETHOD(device_attach,        ae_attach),
  155         DEVMETHOD(device_detach,        ae_detach),
  156         DEVMETHOD(device_shutdown,      ae_shutdown),
  157         DEVMETHOD(device_suspend,       ae_suspend),
  158         DEVMETHOD(device_resume,        ae_resume),
  159 
  160         /* MII interface. */
  161         DEVMETHOD(miibus_readreg,       ae_miibus_readreg),
  162         DEVMETHOD(miibus_writereg,      ae_miibus_writereg),
  163         DEVMETHOD(miibus_statchg,       ae_miibus_statchg),
  164 
  165         { NULL, NULL }
  166 };
  167 static driver_t ae_driver = {
  168         "ae",
  169         ae_methods,
  170         sizeof(ae_softc_t)
  171 };
  172 static devclass_t ae_devclass;
  173 
  174 DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0);
  175 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0);
  176 MODULE_DEPEND(ae, pci, 1, 1, 1);
  177 MODULE_DEPEND(ae, ether, 1, 1, 1);
  178 MODULE_DEPEND(ae, miibus, 1, 1, 1);
  179 
  180 /*
  181  * Tunables.
  182  */
  183 static int msi_disable = 0;
  184 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
  185 
  186 #define AE_READ_4(sc, reg) \
  187         bus_read_4((sc)->mem[0], (reg))
  188 #define AE_READ_2(sc, reg) \
  189         bus_read_2((sc)->mem[0], (reg))
  190 #define AE_READ_1(sc, reg) \
  191         bus_read_1((sc)->mem[0], (reg))
  192 #define AE_WRITE_4(sc, reg, val) \
  193         bus_write_4((sc)->mem[0], (reg), (val))
  194 #define AE_WRITE_2(sc, reg, val) \
  195         bus_write_2((sc)->mem[0], (reg), (val))
  196 #define AE_WRITE_1(sc, reg, val) \
  197         bus_write_1((sc)->mem[0], (reg), (val))
  198 #define AE_PHY_READ(sc, reg) \
  199         ae_miibus_readreg(sc->dev, 0, reg)
  200 #define AE_PHY_WRITE(sc, reg, val) \
  201         ae_miibus_writereg(sc->dev, 0, reg, val)
  202 #define AE_CHECK_EADDR_VALID(eaddr) \
  203         ((eaddr[0] == 0 && eaddr[1] == 0) || \
  204         (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
  205 #define AE_RXD_VLAN(vtag) \
  206         (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
  207 #define AE_TXD_VLAN(vtag) \
  208         (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
  209 
  210 /*
  211  * ae statistics.
  212  */
  213 #define STATS_ENTRY(node, desc, field) \
  214     { node, desc, offsetof(struct ae_stats, field) }
  215 struct {
  216         const char      *node;
  217         const char      *desc;
  218         intptr_t        offset;
  219 } ae_stats_tx[] = {
  220         STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
  221         STATS_ENTRY("mcast", "multicast frames", tx_mcast),
  222         STATS_ENTRY("pause", "PAUSE frames", tx_pause),
  223         STATS_ENTRY("control", "control frames", tx_ctrl),
  224         STATS_ENTRY("defers", "deferrals occuried", tx_defer),
  225         STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
  226         STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
  227         STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
  228         STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
  229         STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
  230         STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
  231 }, ae_stats_rx[] = {
  232         STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
  233         STATS_ENTRY("mcast", "multicast frames", rx_mcast),
  234         STATS_ENTRY("pause", "PAUSE frames", rx_pause),
  235         STATS_ENTRY("control", "control frames", rx_ctrl),
  236         STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
  237         STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
  238         STATS_ENTRY("runt", "runt frames", rx_runt),
  239         STATS_ENTRY("frag", "fragmented frames", rx_frag),
  240         STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
  241         STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
  242             rx_trunc)
  243 };
  244 #define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx))
  245 #define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx))
  246 
  247 static int
  248 ae_probe(device_t dev)
  249 {
  250         uint16_t deviceid, vendorid;
  251         int i;
  252 
  253         vendorid = pci_get_vendor(dev);
  254         deviceid = pci_get_device(dev);
  255 
  256         /*
  257          * Search through the list of supported devs for matching one.
  258          */
  259         for (i = 0; i < AE_DEVS_COUNT; i++) {
  260                 if (vendorid == ae_devs[i].vendorid &&
  261                     deviceid == ae_devs[i].deviceid) {
  262                         device_set_desc(dev, ae_devs[i].name);
  263                         return (BUS_PROBE_DEFAULT);
  264                 }
  265         }
  266         return (ENXIO);
  267 }
  268 
  269 static int
  270 ae_attach(device_t dev)
  271 {
  272         ae_softc_t *sc;
  273         struct ifnet *ifp;
  274         uint8_t chiprev;
  275         uint32_t pcirev;
  276         int nmsi, pmc;
  277         int error;
  278 
  279         sc = device_get_softc(dev); /* Automatically allocated and zeroed
  280                                        on attach. */
  281         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  282         sc->dev = dev;
  283 
  284         /*
  285          * Initialize mutexes and tasks.
  286          */
  287         mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
  288         callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
  289         TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
  290         TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
  291 
  292         pci_enable_busmaster(dev);              /* Enable bus mastering. */
  293 
  294         sc->spec_mem = ae_res_spec_mem;
  295 
  296         /*
  297          * Allocate memory-mapped registers.
  298          */
  299         error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
  300         if (error != 0) {
  301                 device_printf(dev, "could not allocate memory resources.\n");
  302                 sc->spec_mem = NULL;
  303                 goto fail;
  304         }
  305 
  306         /*
  307          * Retrieve PCI and chip revisions.
  308          */
  309         pcirev = pci_get_revid(dev);
  310         chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
  311             AE_MASTER_REVNUM_MASK;
  312         if (bootverbose) {
  313                 device_printf(dev, "pci device revision: %#04x\n", pcirev);
  314                 device_printf(dev, "chip id: %#02x\n", chiprev);
  315         }
  316         nmsi = pci_msi_count(dev);
  317         if (bootverbose)
  318                 device_printf(dev, "MSI count: %d.\n", nmsi);
  319 
  320         /*
  321          * Allocate interrupt resources.
  322          */
  323         if (msi_disable == 0 && nmsi == 1) {
  324                 error = pci_alloc_msi(dev, &nmsi);
  325                 if (error == 0) {
  326                         device_printf(dev, "Using MSI messages.\n");
  327                         sc->spec_irq = ae_res_spec_msi;
  328                         error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
  329                         if (error != 0) {
  330                                 device_printf(dev, "MSI allocation failed.\n");
  331                                 sc->spec_irq = NULL;
  332                                 pci_release_msi(dev);
  333                         } else {
  334                                 sc->flags |= AE_FLAG_MSI;
  335                         }
  336                 }
  337         }
  338         if (sc->spec_irq == NULL) {
  339                 sc->spec_irq = ae_res_spec_irq;
  340                 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
  341                 if (error != 0) {
  342                         device_printf(dev, "could not allocate IRQ resources.\n");
  343                         sc->spec_irq = NULL;
  344                         goto fail;
  345                 }
  346         }
  347         
  348         ae_init_tunables(sc);
  349 
  350         ae_phy_reset(sc);               /* Reset PHY. */
  351         error = ae_reset(sc);           /* Reset the controller itself. */
  352         if (error != 0)
  353                 goto fail;
  354 
  355         ae_pcie_init(sc);
  356 
  357         ae_retrieve_address(sc);        /* Load MAC address. */
  358 
  359         error = ae_alloc_rings(sc);     /* Allocate ring buffers. */
  360         if (error != 0)
  361                 goto fail;
  362 
  363         ifp = sc->ifp = if_alloc(IFT_ETHER);
  364         if (ifp == NULL) {
  365                 device_printf(dev, "could not allocate ifnet structure.\n");
  366                 error = ENXIO;
  367                 goto fail;
  368         }
  369 
  370         ifp->if_softc = sc;
  371         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  372         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  373         ifp->if_ioctl = ae_ioctl;
  374         ifp->if_start = ae_start;
  375         ifp->if_init = ae_init;
  376         ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
  377         ifp->if_hwassist = 0;
  378         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
  379         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  380         IFQ_SET_READY(&ifp->if_snd);
  381         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
  382                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  383                 sc->flags |= AE_FLAG_PMG;
  384         }
  385         ifp->if_capenable = ifp->if_capabilities;
  386 
  387         /*
  388          * Configure and attach MII bus.
  389          */
  390         error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
  391             ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
  392             MII_OFFSET_ANY, 0);
  393         if (error != 0) {
  394                 device_printf(dev, "attaching PHYs failed\n");
  395                 goto fail;
  396         }
  397 
  398         ether_ifattach(ifp, sc->eaddr);
  399         /* Tell the upper layer(s) we support long frames. */
  400         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  401 
  402         /*
  403          * Create and run all helper tasks.
  404          */
  405         sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
  406             taskqueue_thread_enqueue, &sc->tq);
  407         if (sc->tq == NULL) {
  408                 device_printf(dev, "could not create taskqueue.\n");
  409                 ether_ifdetach(ifp);
  410                 error = ENXIO;
  411                 goto fail;
  412         }
  413         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  414             device_get_nameunit(sc->dev));
  415 
  416         /*
  417          * Configure interrupt handlers.
  418          */
  419         error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
  420             ae_intr, NULL, sc, &sc->intrhand);
  421         if (error != 0) {
  422                 device_printf(dev, "could not set up interrupt handler.\n");
  423                 taskqueue_free(sc->tq);
  424                 sc->tq = NULL;
  425                 ether_ifdetach(ifp);
  426                 goto fail;
  427         }
  428 
  429 fail:
  430         if (error != 0)
  431                 ae_detach(dev);
  432         
  433         return (error);
  434 }
  435 
  436 static void
  437 ae_init_tunables(ae_softc_t *sc)
  438 {
  439         struct sysctl_ctx_list *ctx;
  440         struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
  441         struct ae_stats *ae_stats;
  442         unsigned int i;
  443 
  444         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  445         ae_stats = &sc->stats;
  446 
  447         ctx = device_get_sysctl_ctx(sc->dev);
  448         root = device_get_sysctl_tree(sc->dev);
  449         stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
  450             CTLFLAG_RD, NULL, "ae statistics");
  451 
  452         /*
  453          * Receiver statistcics.
  454          */
  455         stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
  456             CTLFLAG_RD, NULL, "Rx MAC statistics");
  457         for (i = 0; i < AE_STATS_RX_LEN; i++)
  458                 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), OID_AUTO,
  459                     ae_stats_rx[i].node, CTLFLAG_RD, (char *)ae_stats +
  460                     ae_stats_rx[i].offset, 0, ae_stats_rx[i].desc);
  461 
  462         /*
  463          * Receiver statistcics.
  464          */
  465         stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
  466             CTLFLAG_RD, NULL, "Tx MAC statistics");
  467         for (i = 0; i < AE_STATS_TX_LEN; i++)
  468                 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), OID_AUTO,
  469                     ae_stats_tx[i].node, CTLFLAG_RD, (char *)ae_stats +
  470                     ae_stats_tx[i].offset, 0, ae_stats_tx[i].desc);
  471 }
  472 
  473 static void
  474 ae_pcie_init(ae_softc_t *sc)
  475 {
  476 
  477         AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
  478         AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
  479 }
  480 
  481 static void
  482 ae_phy_reset(ae_softc_t *sc)
  483 {
  484 
  485         AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
  486         DELAY(1000);    /* XXX: pause(9) ? */
  487 }
  488 
  489 static int
  490 ae_reset(ae_softc_t *sc)
  491 {
  492         int i;
  493 
  494         /*
  495          * Issue a soft reset.
  496          */
  497         AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
  498         bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
  499             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  500         
  501         /*
  502          * Wait for reset to complete.
  503          */
  504         for (i = 0; i < AE_RESET_TIMEOUT; i++) {
  505                 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
  506                         break;
  507                 DELAY(10);
  508         }
  509         if (i == AE_RESET_TIMEOUT) {
  510                 device_printf(sc->dev, "reset timeout.\n");
  511                 return (ENXIO);
  512         }
  513 
  514         /*
  515          * Wait for everything to enter idle state.
  516          */
  517         for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
  518                 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
  519                         break;
  520                 DELAY(100);
  521         }
  522         if (i == AE_IDLE_TIMEOUT) {
  523                 device_printf(sc->dev, "could not enter idle state.\n");
  524                 return (ENXIO);
  525         }
  526         return (0);
  527 }
  528 
  529 static void
  530 ae_init(void *arg)
  531 {
  532         ae_softc_t *sc;
  533 
  534         sc = (ae_softc_t *)arg;
  535         AE_LOCK(sc);
  536         ae_init_locked(sc);
  537         AE_UNLOCK(sc);
  538 }
  539 
  540 static void
  541 ae_phy_init(ae_softc_t *sc)
  542 {
  543 
  544         /*
  545          * Enable link status change interrupt.
  546          * XXX magic numbers.
  547          */
  548 #ifdef notyet
  549         AE_PHY_WRITE(sc, 18, 0xc00);
  550 #endif
  551 }
  552 
  553 static int
  554 ae_init_locked(ae_softc_t *sc)
  555 {
  556         struct ifnet *ifp;
  557         struct mii_data *mii;
  558         uint8_t eaddr[ETHER_ADDR_LEN];
  559         uint32_t val;
  560         bus_addr_t addr;
  561 
  562         AE_LOCK_ASSERT(sc);
  563 
  564         ifp = sc->ifp;
  565         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  566                 return (0);
  567         mii = device_get_softc(sc->miibus);
  568 
  569         ae_stop(sc);
  570         ae_reset(sc);
  571         ae_pcie_init(sc);               /* Initialize PCIE stuff. */
  572         ae_phy_init(sc);
  573         ae_powersave_disable(sc);
  574 
  575         /*
  576          * Clear and disable interrupts.
  577          */
  578         AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
  579 
  580         /*
  581          * Set the MAC address.
  582          */
  583         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
  584         val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
  585         AE_WRITE_4(sc, AE_EADDR0_REG, val);
  586         val = eaddr[0] << 8 | eaddr[1];
  587         AE_WRITE_4(sc, AE_EADDR1_REG, val);
  588 
  589         /*
  590          * Set ring buffers base addresses.
  591          */
  592         addr = sc->dma_rxd_busaddr;
  593         AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
  594         AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
  595         addr = sc->dma_txd_busaddr;
  596         AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
  597         addr = sc->dma_txs_busaddr;
  598         AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
  599 
  600         /*
  601          * Configure ring buffers sizes.
  602          */
  603         AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
  604         AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
  605         AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
  606 
  607         /*
  608          * Configure interframe gap parameters.
  609          */
  610         val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
  611             AE_IFG_TXIPG_MASK) |
  612             ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
  613             AE_IFG_RXIPG_MASK) |
  614             ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
  615             AE_IFG_IPGR1_MASK) |
  616             ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
  617             AE_IFG_IPGR2_MASK);
  618         AE_WRITE_4(sc, AE_IFG_REG, val);
  619 
  620         /*
  621          * Configure half-duplex operation.
  622          */
  623         val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
  624             AE_HDPX_LCOL_MASK) |
  625             ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
  626             AE_HDPX_RETRY_MASK) |
  627             ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
  628             AE_HDPX_ABEBT_MASK) |
  629             ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
  630             AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
  631         AE_WRITE_4(sc, AE_HDPX_REG, val);
  632 
  633         /*
  634          * Configure interrupt moderate timer.
  635          */
  636         AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
  637         val = AE_READ_4(sc, AE_MASTER_REG);
  638         val |= AE_MASTER_IMT_EN;
  639         AE_WRITE_4(sc, AE_MASTER_REG, val);
  640 
  641         /*
  642          * Configure interrupt clearing timer.
  643          */
  644         AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
  645 
  646         /*
  647          * Configure MTU.
  648          */
  649         val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
  650             ETHER_CRC_LEN;
  651         AE_WRITE_2(sc, AE_MTU_REG, val);
  652 
  653         /*
  654          * Configure cut-through threshold.
  655          */
  656         AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
  657 
  658         /*
  659          * Configure flow control.
  660          */
  661         AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
  662         AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
  663             (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
  664             (AE_RXD_COUNT_DEFAULT / 12));
  665 
  666         /*
  667          * Init mailboxes.
  668          */
  669         sc->txd_cur = sc->rxd_cur = 0;
  670         sc->txs_ack = sc->txd_ack = 0;
  671         sc->rxd_cur = 0;
  672         AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
  673         AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
  674 
  675         sc->tx_inproc = 0;      /* Number of packets the chip processes now. */
  676         sc->flags |= AE_FLAG_TXAVAIL;   /* Free Tx's available. */
  677 
  678         /*
  679          * Enable DMA.
  680          */
  681         AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
  682         AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
  683 
  684         /*
  685          * Check if everything is OK.
  686          */
  687         val = AE_READ_4(sc, AE_ISR_REG);
  688         if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
  689                 device_printf(sc->dev, "Initialization failed.\n");
  690                 return (ENXIO);
  691         }
  692 
  693         /*
  694          * Clear interrupt status.
  695          */
  696         AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
  697         AE_WRITE_4(sc, AE_ISR_REG, 0x0);
  698 
  699         /*
  700          * Enable interrupts.
  701          */
  702         val = AE_READ_4(sc, AE_MASTER_REG);
  703         AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
  704         AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
  705 
  706         /*
  707          * Disable WOL.
  708          */
  709         AE_WRITE_4(sc, AE_WOL_REG, 0);
  710 
  711         /*
  712          * Configure MAC.
  713          */
  714         val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
  715             AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
  716             AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
  717             ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
  718             ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
  719             AE_MAC_PREAMBLE_MASK);
  720         AE_WRITE_4(sc, AE_MAC_REG, val);
  721 
  722         /*
  723          * Configure Rx MAC.
  724          */
  725         ae_rxfilter(sc);
  726         ae_rxvlan(sc);
  727 
  728         /*
  729          * Enable Tx/Rx.
  730          */
  731         val = AE_READ_4(sc, AE_MAC_REG);
  732         AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
  733 
  734         sc->flags &= ~AE_FLAG_LINK;
  735         mii_mediachg(mii);      /* Switch to the current media. */
  736 
  737         callout_reset(&sc->tick_ch, hz, ae_tick, sc);
  738 
  739         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  740         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  741 
  742 #ifdef AE_DEBUG
  743         device_printf(sc->dev, "Initialization complete.\n");
  744 #endif
  745 
  746         return (0);
  747 }
  748 
  749 static int
  750 ae_detach(device_t dev)
  751 {
  752         struct ae_softc *sc;
  753         struct ifnet *ifp;
  754 
  755         sc = device_get_softc(dev);
  756         KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
  757         ifp = sc->ifp;
  758         if (device_is_attached(dev)) {
  759                 AE_LOCK(sc);
  760                 sc->flags |= AE_FLAG_DETACH;
  761                 ae_stop(sc);
  762                 AE_UNLOCK(sc);
  763                 callout_drain(&sc->tick_ch);
  764                 taskqueue_drain(sc->tq, &sc->int_task);
  765                 taskqueue_drain(taskqueue_swi, &sc->link_task);
  766                 ether_ifdetach(ifp);
  767         }
  768         if (sc->tq != NULL) {
  769                 taskqueue_drain(sc->tq, &sc->int_task);
  770                 taskqueue_free(sc->tq);
  771                 sc->tq = NULL;
  772         }
  773         if (sc->miibus != NULL) {
  774                 device_delete_child(dev, sc->miibus);
  775                 sc->miibus = NULL;
  776         }
  777         bus_generic_detach(sc->dev);
  778         ae_dma_free(sc);
  779         if (sc->intrhand != NULL) {
  780                 bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
  781                 sc->intrhand = NULL;
  782         }
  783         if (ifp != NULL) {
  784                 if_free(ifp);
  785                 sc->ifp = NULL;
  786         }
  787         if (sc->spec_irq != NULL)
  788                 bus_release_resources(dev, sc->spec_irq, sc->irq);
  789         if (sc->spec_mem != NULL)
  790                 bus_release_resources(dev, sc->spec_mem, sc->mem);
  791         if ((sc->flags & AE_FLAG_MSI) != 0)
  792                 pci_release_msi(dev);
  793         mtx_destroy(&sc->mtx);
  794 
  795         return (0);
  796 }
  797 
  798 static int
  799 ae_miibus_readreg(device_t dev, int phy, int reg)
  800 {
  801         ae_softc_t *sc;
  802         uint32_t val;
  803         int i;
  804 
  805         sc = device_get_softc(dev);
  806         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  807 
  808         /*
  809          * Locking is done in upper layers.
  810          */
  811 
  812         val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
  813             AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
  814             ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
  815         AE_WRITE_4(sc, AE_MDIO_REG, val);
  816 
  817         /*
  818          * Wait for operation to complete.
  819          */
  820         for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
  821                 DELAY(2);
  822                 val = AE_READ_4(sc, AE_MDIO_REG);
  823                 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
  824                         break;
  825         }
  826         if (i == AE_MDIO_TIMEOUT) {
  827                 device_printf(sc->dev, "phy read timeout: %d.\n", reg);
  828                 return (0);
  829         }
  830         return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
  831 }
  832 
  833 static int
  834 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
  835 {
  836         ae_softc_t *sc;
  837         uint32_t aereg;
  838         int i;
  839 
  840         sc = device_get_softc(dev);
  841         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  842 
  843         /*
  844          * Locking is done in upper layers.
  845          */
  846 
  847         aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
  848             AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
  849             ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
  850             ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
  851         AE_WRITE_4(sc, AE_MDIO_REG, aereg);
  852 
  853         /*
  854          * Wait for operation to complete.
  855          */
  856         for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
  857                 DELAY(2);
  858                 aereg = AE_READ_4(sc, AE_MDIO_REG);
  859                 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
  860                         break;
  861         }
  862         if (i == AE_MDIO_TIMEOUT) {
  863                 device_printf(sc->dev, "phy write timeout: %d.\n", reg);
  864         }
  865         return (0);
  866 }
  867 
  868 static void
  869 ae_miibus_statchg(device_t dev)
  870 {
  871         ae_softc_t *sc;
  872 
  873         sc = device_get_softc(dev);
  874         taskqueue_enqueue(taskqueue_swi, &sc->link_task);
  875 }
  876 
  877 static void
  878 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  879 {
  880         ae_softc_t *sc;
  881         struct mii_data *mii;
  882 
  883         sc = ifp->if_softc;
  884         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  885 
  886         AE_LOCK(sc);
  887         mii = device_get_softc(sc->miibus);
  888         mii_pollstat(mii);
  889         ifmr->ifm_status = mii->mii_media_status;
  890         ifmr->ifm_active = mii->mii_media_active;
  891         AE_UNLOCK(sc);
  892 }
  893 
  894 static int
  895 ae_mediachange(struct ifnet *ifp)
  896 {
  897         ae_softc_t *sc;
  898         struct mii_data *mii;
  899         struct mii_softc *mii_sc;
  900         int error;
  901 
  902         /* XXX: check IFF_UP ?? */
  903         sc = ifp->if_softc;
  904         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  905         AE_LOCK(sc);
  906         mii = device_get_softc(sc->miibus);
  907         LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
  908                 mii_phy_reset(mii_sc);
  909         error = mii_mediachg(mii);
  910         AE_UNLOCK(sc);
  911 
  912         return (error);
  913 }
  914 
  915 static int
  916 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
  917 {
  918         int error;
  919         uint32_t val;
  920 
  921         KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
  922 
  923         /*
  924          * Not sure why, but Linux does this.
  925          */
  926         val = AE_READ_4(sc, AE_SPICTL_REG);
  927         if ((val & AE_SPICTL_VPD_EN) != 0) {
  928                 val &= ~AE_SPICTL_VPD_EN;
  929                 AE_WRITE_4(sc, AE_SPICTL_REG, val);
  930         }
  931         error = pci_find_extcap(sc->dev, PCIY_VPD, vpdc);
  932         return (error);
  933 }
  934 
  935 static int
  936 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
  937 {
  938         uint32_t val;
  939         int i;
  940 
  941         AE_WRITE_4(sc, AE_VPD_DATA_REG, 0);     /* Clear register value. */
  942 
  943         /*
  944          * VPD registers start at offset 0x100. Read them.
  945          */
  946         val = 0x100 + reg * 4;
  947         AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
  948             AE_VPD_CAP_ADDR_MASK);
  949         for (i = 0; i < AE_VPD_TIMEOUT; i++) {
  950                 DELAY(2000);
  951                 val = AE_READ_4(sc, AE_VPD_CAP_REG);
  952                 if ((val & AE_VPD_CAP_DONE) != 0)
  953                         break;
  954         }
  955         if (i == AE_VPD_TIMEOUT) {
  956                 device_printf(sc->dev, "timeout reading VPD register %d.\n",
  957                     reg);
  958                 return (ETIMEDOUT);
  959         }
  960         *word = AE_READ_4(sc, AE_VPD_DATA_REG);
  961         return (0);
  962 }
  963 
  964 static int
  965 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
  966 {
  967         uint32_t word, reg, val;
  968         int error;
  969         int found;
  970         int vpdc;
  971         int i;
  972 
  973         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  974         KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
  975 
  976         /*
  977          * Check for EEPROM.
  978          */
  979         error = ae_check_eeprom_present(sc, &vpdc);
  980         if (error != 0)
  981                 return (error);
  982 
  983         /*
  984          * Read the VPD configuration space.
  985          * Each register is prefixed with signature,
  986          * so we can check if it is valid.
  987          */
  988         for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
  989                 error = ae_vpd_read_word(sc, i, &word);
  990                 if (error != 0)
  991                         break;
  992 
  993                 /*
  994                  * Check signature.
  995                  */
  996                 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
  997                         break;
  998                 reg = word >> AE_VPD_REG_SHIFT;
  999                 i++;    /* Move to the next word. */
 1000 
 1001                 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
 1002                         continue;
 1003 
 1004                 error = ae_vpd_read_word(sc, i, &val);
 1005                 if (error != 0)
 1006                         break;
 1007                 if (reg == AE_EADDR0_REG)
 1008                         eaddr[0] = val;
 1009                 else
 1010                         eaddr[1] = val;
 1011                 found++;
 1012         }
 1013 
 1014         if (found < 2)
 1015                 return (ENOENT);
 1016         
 1017         eaddr[1] &= 0xffff;     /* Only last 2 bytes are used. */
 1018         if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
 1019                 if (bootverbose)
 1020                         device_printf(sc->dev,
 1021                             "VPD ethernet address registers are invalid.\n");
 1022                 return (EINVAL);
 1023         }
 1024         return (0);
 1025 }
 1026 
 1027 static int
 1028 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
 1029 {
 1030 
 1031         /*
 1032          * BIOS is supposed to set this.
 1033          */
 1034         eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
 1035         eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
 1036         eaddr[1] &= 0xffff;     /* Only last 2 bytes are used. */
 1037 
 1038         if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
 1039                 if (bootverbose)
 1040                         device_printf(sc->dev,
 1041                             "Ethernet address registers are invalid.\n");
 1042                 return (EINVAL);
 1043         }
 1044         return (0);
 1045 }
 1046 
 1047 static void
 1048 ae_retrieve_address(ae_softc_t *sc)
 1049 {
 1050         uint32_t eaddr[2] = {0, 0};
 1051         int error;
 1052 
 1053         /*
 1054          *Check for EEPROM.
 1055          */
 1056         error = ae_get_vpd_eaddr(sc, eaddr);
 1057         if (error != 0)
 1058                 error = ae_get_reg_eaddr(sc, eaddr);
 1059         if (error != 0) {
 1060                 if (bootverbose)
 1061                         device_printf(sc->dev,
 1062                             "Generating random ethernet address.\n");
 1063                 eaddr[0] = arc4random();
 1064 
 1065                 /*
 1066                  * Set OUI to ASUSTek COMPUTER INC.
 1067                  */
 1068                 sc->eaddr[0] = 0x02;    /* U/L bit set. */
 1069                 sc->eaddr[1] = 0x1f;
 1070                 sc->eaddr[2] = 0xc6;
 1071                 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
 1072                 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
 1073                 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
 1074         } else {
 1075                 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
 1076                 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
 1077                 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
 1078                 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
 1079                 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
 1080                 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
 1081         }
 1082 }
 1083 
 1084 static void
 1085 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1086 {
 1087         bus_addr_t *addr = arg;
 1088 
 1089         if (error != 0)
 1090                 return;
 1091         KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
 1092             nsegs));
 1093         *addr = segs[0].ds_addr;
 1094 }
 1095 
 1096 static int
 1097 ae_alloc_rings(ae_softc_t *sc)
 1098 {
 1099         bus_addr_t busaddr;
 1100         int error;
 1101 
 1102         /*
 1103          * Create parent DMA tag.
 1104          */
 1105         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
 1106             1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
 1107             NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
 1108             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
 1109             &sc->dma_parent_tag);
 1110         if (error != 0) {
 1111                 device_printf(sc->dev, "could not creare parent DMA tag.\n");
 1112                 return (error);
 1113         }
 1114 
 1115         /*
 1116          * Create DMA tag for TxD.
 1117          */
 1118         error = bus_dma_tag_create(sc->dma_parent_tag,
 1119             4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1120             NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
 1121             AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
 1122             &sc->dma_txd_tag);
 1123         if (error != 0) {
 1124                 device_printf(sc->dev, "could not creare TxD DMA tag.\n");
 1125                 return (error);
 1126         }
 1127 
 1128         /*
 1129          * Create DMA tag for TxS.
 1130          */
 1131         error = bus_dma_tag_create(sc->dma_parent_tag,
 1132             4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1133             NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
 1134             AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
 1135             &sc->dma_txs_tag);
 1136         if (error != 0) {
 1137                 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
 1138                 return (error);
 1139         }
 1140 
 1141         /*
 1142          * Create DMA tag for RxD.
 1143          */
 1144         error = bus_dma_tag_create(sc->dma_parent_tag,
 1145             128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1146             NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1,
 1147             AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL,
 1148             &sc->dma_rxd_tag);
 1149         if (error != 0) {
 1150                 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
 1151                 return (error);
 1152         }
 1153 
 1154         /*
 1155          * Allocate TxD DMA memory.
 1156          */
 1157         error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
 1158             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1159             &sc->dma_txd_map);
 1160         if (error != 0) {
 1161                 device_printf(sc->dev,
 1162                     "could not allocate DMA memory for TxD ring.\n");
 1163                 return (error);
 1164         }
 1165         error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
 1166             AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
 1167         if (error != 0 || busaddr == 0) {
 1168                 device_printf(sc->dev,
 1169                     "could not load DMA map for TxD ring.\n");
 1170                 return (error);
 1171         }
 1172         sc->dma_txd_busaddr = busaddr;
 1173 
 1174         /*
 1175          * Allocate TxS DMA memory.
 1176          */
 1177         error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
 1178             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1179             &sc->dma_txs_map);
 1180         if (error != 0) {
 1181                 device_printf(sc->dev,
 1182                     "could not allocate DMA memory for TxS ring.\n");
 1183                 return (error);
 1184         }
 1185         error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
 1186             AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
 1187         if (error != 0 || busaddr == 0) {
 1188                 device_printf(sc->dev,
 1189                     "could not load DMA map for TxS ring.\n");
 1190                 return (error);
 1191         }
 1192         sc->dma_txs_busaddr = busaddr;
 1193 
 1194         /*
 1195          * Allocate RxD DMA memory.
 1196          */
 1197         error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
 1198             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1199             &sc->dma_rxd_map);
 1200         if (error != 0) {
 1201                 device_printf(sc->dev,
 1202                     "could not allocate DMA memory for RxD ring.\n");
 1203                 return (error);
 1204         }
 1205         error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
 1206             sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb,
 1207             &busaddr, BUS_DMA_NOWAIT);
 1208         if (error != 0 || busaddr == 0) {
 1209                 device_printf(sc->dev,
 1210                     "could not load DMA map for RxD ring.\n");
 1211                 return (error);
 1212         }
 1213         sc->dma_rxd_busaddr = busaddr + 120;
 1214         sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120);
 1215 
 1216         return (0);
 1217 }
 1218 
 1219 static void
 1220 ae_dma_free(ae_softc_t *sc)
 1221 {
 1222 
 1223         if (sc->dma_txd_tag != NULL) {
 1224                 if (sc->dma_txd_map != NULL) {
 1225                         bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
 1226                         if (sc->txd_base != NULL)
 1227                                 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
 1228                                     sc->dma_txd_map);
 1229 
 1230                 }
 1231                 bus_dma_tag_destroy(sc->dma_txd_tag);
 1232                 sc->dma_txd_map = NULL;
 1233                 sc->dma_txd_tag = NULL;
 1234                 sc->txd_base = NULL;
 1235         }
 1236         if (sc->dma_txs_tag != NULL) {
 1237                 if (sc->dma_txs_map != NULL) {
 1238                         bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
 1239                         if (sc->txs_base != NULL)
 1240                                 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
 1241                                     sc->dma_txs_map);
 1242 
 1243                 }
 1244                 bus_dma_tag_destroy(sc->dma_txs_tag);
 1245                 sc->dma_txs_map = NULL;
 1246                 sc->dma_txs_tag = NULL;
 1247                 sc->txs_base = NULL;
 1248         }
 1249         if (sc->dma_rxd_tag != NULL) {
 1250                 if (sc->dma_rxd_map != NULL) {
 1251                         bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
 1252                         if (sc->rxd_base_dma != NULL)
 1253                                 bus_dmamem_free(sc->dma_rxd_tag,
 1254                                     sc->rxd_base_dma, sc->dma_rxd_map);
 1255 
 1256                 }
 1257                 bus_dma_tag_destroy(sc->dma_rxd_tag);
 1258                 sc->dma_rxd_map = NULL;
 1259                 sc->dma_rxd_tag = NULL;
 1260                 sc->rxd_base_dma = NULL;
 1261         }
 1262         if (sc->dma_parent_tag != NULL) {
 1263                 bus_dma_tag_destroy(sc->dma_parent_tag);
 1264                 sc->dma_parent_tag = NULL;
 1265         }
 1266 }
 1267 
 1268 static int
 1269 ae_shutdown(device_t dev)
 1270 {
 1271         ae_softc_t *sc;
 1272         int error;
 1273 
 1274         sc = device_get_softc(dev);
 1275         KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
 1276 
 1277         error = ae_suspend(dev);
 1278         AE_LOCK(sc);
 1279         ae_powersave_enable(sc);
 1280         AE_UNLOCK(sc);
 1281         return (error);
 1282 }
 1283 
 1284 static void
 1285 ae_powersave_disable(ae_softc_t *sc)
 1286 {
 1287         uint32_t val;
 1288         
 1289         AE_LOCK_ASSERT(sc);
 1290 
 1291         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
 1292         val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
 1293         if (val & AE_PHY_DBG_POWERSAVE) {
 1294                 val &= ~AE_PHY_DBG_POWERSAVE;
 1295                 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
 1296                 DELAY(1000);
 1297         }
 1298 }
 1299 
 1300 static void
 1301 ae_powersave_enable(ae_softc_t *sc)
 1302 {
 1303         uint32_t val;
 1304         
 1305         AE_LOCK_ASSERT(sc);
 1306 
 1307         /*
 1308          * XXX magic numbers.
 1309          */
 1310         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
 1311         val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
 1312         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
 1313         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
 1314         AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
 1315         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
 1316         AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
 1317 }
 1318 
 1319 static void
 1320 ae_pm_init(ae_softc_t *sc)
 1321 {
 1322         struct ifnet *ifp;
 1323         uint32_t val;
 1324         uint16_t pmstat;
 1325         struct mii_data *mii;
 1326         int pmc;
 1327 
 1328         AE_LOCK_ASSERT(sc);
 1329 
 1330         ifp = sc->ifp;
 1331         if ((sc->flags & AE_FLAG_PMG) == 0) {
 1332                 /* Disable WOL entirely. */
 1333                 AE_WRITE_4(sc, AE_WOL_REG, 0);
 1334                 return;
 1335         }
 1336 
 1337         /*
 1338          * Configure WOL if enabled.
 1339          */
 1340         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 1341                 mii = device_get_softc(sc->miibus);
 1342                 mii_pollstat(mii);
 1343                 if ((mii->mii_media_status & IFM_AVALID) != 0 &&
 1344                     (mii->mii_media_status & IFM_ACTIVE) != 0) {
 1345                         AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
 1346                             AE_WOL_MAGIC_PME);
 1347 
 1348                         /*
 1349                          * Configure MAC.
 1350                          */
 1351                         val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
 1352                             AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
 1353                             ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
 1354                             AE_HALFBUF_MASK) | \
 1355                             ((AE_MAC_PREAMBLE_DEFAULT << \
 1356                             AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
 1357                             AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
 1358                         if ((IFM_OPTIONS(mii->mii_media_active) & \
 1359                             IFM_FDX) != 0)
 1360                                 val |= AE_MAC_FULL_DUPLEX;
 1361                         AE_WRITE_4(sc, AE_MAC_REG, val);
 1362                             
 1363                 } else {        /* No link. */
 1364                         AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
 1365                             AE_WOL_LNKCHG_PME);
 1366                         AE_WRITE_4(sc, AE_MAC_REG, 0);
 1367                 }
 1368         } else {
 1369                 ae_powersave_enable(sc);
 1370         }
 1371 
 1372         /*
 1373          * PCIE hacks. Magic numbers.
 1374          */
 1375         val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
 1376         val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
 1377         AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
 1378         val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
 1379         val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
 1380         AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
 1381 
 1382         /*
 1383          * Configure PME.
 1384          */
 1385         if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1386                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1387                 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1388                 if ((ifp->if_capenable & IFCAP_WOL) != 0)
 1389                         pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1390                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1391         }
 1392 }
 1393 
 1394 static int
 1395 ae_suspend(device_t dev)
 1396 {
 1397         ae_softc_t *sc;
 1398 
 1399         sc = device_get_softc(dev);
 1400 
 1401         AE_LOCK(sc);
 1402         ae_stop(sc);
 1403         ae_pm_init(sc);
 1404         AE_UNLOCK(sc);
 1405 
 1406         return (0);
 1407 }
 1408 
 1409 static int
 1410 ae_resume(device_t dev)
 1411 {
 1412         ae_softc_t *sc;
 1413 
 1414         sc = device_get_softc(dev);
 1415         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1416 
 1417         AE_LOCK(sc);
 1418         AE_READ_4(sc, AE_WOL_REG);      /* Clear WOL status. */
 1419         if ((sc->ifp->if_flags & IFF_UP) != 0)
 1420                 ae_init_locked(sc);
 1421         AE_UNLOCK(sc);
 1422 
 1423         return (0);
 1424 }
 1425 
 1426 static unsigned int
 1427 ae_tx_avail_size(ae_softc_t *sc)
 1428 {
 1429         unsigned int avail;
 1430         
 1431         if (sc->txd_cur >= sc->txd_ack)
 1432                 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
 1433         else
 1434                 avail = sc->txd_ack - sc->txd_cur;
 1435 
 1436         return (avail);
 1437 }
 1438 
 1439 static int
 1440 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
 1441 {
 1442         struct mbuf *m0;
 1443         ae_txd_t *hdr;
 1444         unsigned int to_end;
 1445         uint16_t len;
 1446 
 1447         AE_LOCK_ASSERT(sc);
 1448 
 1449         m0 = *m_head;
 1450         len = m0->m_pkthdr.len;
 1451         
 1452         if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
 1453             len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
 1454 #ifdef AE_DEBUG
 1455                 if_printf(sc->ifp, "No free Tx available.\n");
 1456 #endif
 1457                 return ENOBUFS;
 1458         }
 1459 
 1460         hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
 1461         bzero(hdr, sizeof(*hdr));
 1462         /* Skip header size. */
 1463         sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
 1464         /* Space available to the end of the ring */
 1465         to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
 1466         if (to_end >= len) {
 1467                 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
 1468         } else {
 1469                 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
 1470                     sc->txd_cur));
 1471                 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
 1472         }
 1473 
 1474         /*
 1475          * Set TxD flags and parameters.
 1476          */
 1477         if ((m0->m_flags & M_VLANTAG) != 0) {
 1478                 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
 1479                 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
 1480         } else {
 1481                 hdr->len = htole16(len);
 1482         }
 1483 
 1484         /*
 1485          * Set current TxD position and round up to a 4-byte boundary.
 1486          */
 1487         sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
 1488         if (sc->txd_cur == sc->txd_ack)
 1489                 sc->flags &= ~AE_FLAG_TXAVAIL;
 1490 #ifdef AE_DEBUG
 1491         if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
 1492 #endif
 1493 
 1494         /*
 1495          * Update TxS position and check if there are empty TxS available.
 1496          */
 1497         sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
 1498         sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
 1499         if (sc->txs_cur == sc->txs_ack)
 1500                 sc->flags &= ~AE_FLAG_TXAVAIL;
 1501 
 1502         /*
 1503          * Synchronize DMA memory.
 1504          */
 1505         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
 1506             BUS_DMASYNC_PREWRITE);
 1507         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1508             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1509 
 1510         return (0);
 1511 }
 1512 
 1513 static void
 1514 ae_start(struct ifnet *ifp)
 1515 {
 1516         ae_softc_t *sc;
 1517 
 1518         sc = ifp->if_softc;
 1519         AE_LOCK(sc);
 1520         ae_start_locked(ifp);
 1521         AE_UNLOCK(sc);
 1522 }
 1523 
 1524 static void
 1525 ae_start_locked(struct ifnet *ifp)
 1526 {
 1527         ae_softc_t *sc;
 1528         unsigned int count;
 1529         struct mbuf *m0;
 1530         int error;
 1531 
 1532         sc = ifp->if_softc;
 1533         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1534         AE_LOCK_ASSERT(sc);
 1535 
 1536 #ifdef AE_DEBUG
 1537         if_printf(ifp, "Start called.\n");
 1538 #endif
 1539 
 1540         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1541             IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
 1542                 return;
 1543 
 1544         count = 0;
 1545         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 1546                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
 1547                 if (m0 == NULL)
 1548                         break;  /* Nothing to do. */
 1549 
 1550                 error = ae_encap(sc, &m0);
 1551                 if (error != 0) {
 1552                         if (m0 != NULL) {
 1553                                 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
 1554                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1555 #ifdef AE_DEBUG
 1556                                 if_printf(ifp, "Setting OACTIVE.\n");
 1557 #endif
 1558                         }
 1559                         break;
 1560                 }
 1561                 count++;
 1562                 sc->tx_inproc++;
 1563 
 1564                 /* Bounce a copy of the frame to BPF. */
 1565                 ETHER_BPF_MTAP(ifp, m0);
 1566 
 1567                 m_freem(m0);
 1568         }
 1569 
 1570         if (count > 0) {        /* Something was dequeued. */
 1571                 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
 1572                 sc->wd_timer = AE_TX_TIMEOUT;   /* Load watchdog. */
 1573 #ifdef AE_DEBUG
 1574                 if_printf(ifp, "%d packets dequeued.\n", count);
 1575                 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
 1576 #endif
 1577         }
 1578 }
 1579 
 1580 static void
 1581 ae_link_task(void *arg, int pending)
 1582 {
 1583         ae_softc_t *sc;
 1584         struct mii_data *mii;
 1585         struct ifnet *ifp;
 1586         uint32_t val;
 1587 
 1588         sc = (ae_softc_t *)arg;
 1589         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1590         AE_LOCK(sc);
 1591 
 1592         ifp = sc->ifp;
 1593         mii = device_get_softc(sc->miibus);
 1594         if (mii == NULL || ifp == NULL ||
 1595             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1596                 AE_UNLOCK(sc);  /* XXX: could happen? */
 1597                 return;
 1598         }
 1599         
 1600         sc->flags &= ~AE_FLAG_LINK;
 1601         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
 1602             (IFM_AVALID | IFM_ACTIVE)) {
 1603                 switch(IFM_SUBTYPE(mii->mii_media_active)) {
 1604                 case IFM_10_T:
 1605                 case IFM_100_TX:
 1606                         sc->flags |= AE_FLAG_LINK;
 1607                         break;
 1608                 default:
 1609                         break;
 1610                 }
 1611         }
 1612 
 1613         /*
 1614          * Stop Rx/Tx MACs.
 1615          */
 1616         ae_stop_rxmac(sc);
 1617         ae_stop_txmac(sc);
 1618 
 1619         if ((sc->flags & AE_FLAG_LINK) != 0) {
 1620                 ae_mac_config(sc);
 1621 
 1622                 /*
 1623                  * Restart DMA engines.
 1624                  */
 1625                 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
 1626                 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
 1627 
 1628                 /*
 1629                  * Enable Rx and Tx MACs.
 1630                  */
 1631                 val = AE_READ_4(sc, AE_MAC_REG);
 1632                 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
 1633                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1634         }
 1635         AE_UNLOCK(sc);
 1636 }
 1637 
 1638 static void
 1639 ae_stop_rxmac(ae_softc_t *sc)
 1640 {
 1641         uint32_t val;
 1642         int i;
 1643 
 1644         AE_LOCK_ASSERT(sc);
 1645 
 1646         /*
 1647          * Stop Rx MAC engine.
 1648          */
 1649         val = AE_READ_4(sc, AE_MAC_REG);
 1650         if ((val & AE_MAC_RX_EN) != 0) {
 1651                 val &= ~AE_MAC_RX_EN;
 1652                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1653         }
 1654 
 1655         /*
 1656          * Stop Rx DMA engine.
 1657          */
 1658         if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
 1659                 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
 1660 
 1661         /*
 1662          * Wait for IDLE state.
 1663          */
 1664         for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
 1665                 val = AE_READ_4(sc, AE_IDLE_REG);
 1666                 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
 1667                         break;
 1668                 DELAY(100);
 1669         }
 1670         if (i == AE_IDLE_TIMEOUT)
 1671                 device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
 1672 }
 1673 
 1674 static void
 1675 ae_stop_txmac(ae_softc_t *sc)
 1676 {
 1677         uint32_t val;
 1678         int i;
 1679 
 1680         AE_LOCK_ASSERT(sc);
 1681 
 1682         /*
 1683          * Stop Tx MAC engine.
 1684          */
 1685         val = AE_READ_4(sc, AE_MAC_REG);
 1686         if ((val & AE_MAC_TX_EN) != 0) {
 1687                 val &= ~AE_MAC_TX_EN;
 1688                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1689         }
 1690 
 1691         /*
 1692          * Stop Tx DMA engine.
 1693          */
 1694         if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
 1695                 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
 1696 
 1697         /*
 1698          * Wait for IDLE state.
 1699          */
 1700         for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
 1701                 val = AE_READ_4(sc, AE_IDLE_REG);
 1702                 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
 1703                         break;
 1704                 DELAY(100);
 1705         }
 1706         if (i == AE_IDLE_TIMEOUT)
 1707                 device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
 1708 }
 1709 
 1710 static void
 1711 ae_mac_config(ae_softc_t *sc)
 1712 {
 1713         struct mii_data *mii;
 1714         uint32_t val;
 1715 
 1716         AE_LOCK_ASSERT(sc);
 1717 
 1718         mii = device_get_softc(sc->miibus);
 1719         val = AE_READ_4(sc, AE_MAC_REG);
 1720         val &= ~AE_MAC_FULL_DUPLEX;
 1721         /* XXX disable AE_MAC_TX_FLOW_EN? */
 1722 
 1723         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
 1724                 val |= AE_MAC_FULL_DUPLEX;
 1725 
 1726         AE_WRITE_4(sc, AE_MAC_REG, val);
 1727 }
 1728 
 1729 static int
 1730 ae_intr(void *arg)
 1731 {
 1732         ae_softc_t *sc;
 1733         uint32_t val;
 1734 
 1735         sc = (ae_softc_t *)arg;
 1736         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1737 
 1738         val = AE_READ_4(sc, AE_ISR_REG);
 1739         if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
 1740                 return (FILTER_STRAY);
 1741 
 1742         /* Disable interrupts. */
 1743         AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
 1744 
 1745         /* Schedule interrupt processing. */
 1746         taskqueue_enqueue(sc->tq, &sc->int_task);
 1747 
 1748         return (FILTER_HANDLED);
 1749 }
 1750 
 1751 static void
 1752 ae_int_task(void *arg, int pending)
 1753 {
 1754         ae_softc_t *sc;
 1755         struct ifnet *ifp;
 1756         uint32_t val;
 1757 
 1758         sc = (ae_softc_t *)arg;
 1759 
 1760         AE_LOCK(sc);
 1761 
 1762         ifp = sc->ifp;
 1763 
 1764         val = AE_READ_4(sc, AE_ISR_REG);        /* Read interrupt status. */
 1765 
 1766         /*
 1767          * Clear interrupts and disable them.
 1768          */
 1769         AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
 1770 
 1771 #ifdef AE_DEBUG
 1772         if_printf(ifp, "Interrupt received: 0x%08x\n", val);
 1773 #endif
 1774 
 1775         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1776                 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
 1777                     AE_ISR_PHY_LINKDOWN)) != 0) {
 1778                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1779                         ae_init_locked(sc);
 1780                         AE_UNLOCK(sc);
 1781                         return;
 1782                 }
 1783                 if ((val & AE_ISR_TX_EVENT) != 0)
 1784                         ae_tx_intr(sc);
 1785                 if ((val & AE_ISR_RX_EVENT) != 0)
 1786                         ae_rx_intr(sc);
 1787         }
 1788 
 1789         /*
 1790          * Re-enable interrupts.
 1791          */
 1792         AE_WRITE_4(sc, AE_ISR_REG, 0);
 1793 
 1794         AE_UNLOCK(sc);
 1795 }
 1796 
 1797 static void
 1798 ae_tx_intr(ae_softc_t *sc)
 1799 {
 1800         struct ifnet *ifp;
 1801         ae_txd_t *txd;
 1802         ae_txs_t *txs;
 1803         uint16_t flags;
 1804 
 1805         AE_LOCK_ASSERT(sc);
 1806 
 1807         ifp = sc->ifp;
 1808 
 1809 #ifdef AE_DEBUG
 1810         if_printf(ifp, "Tx interrupt occuried.\n");
 1811 #endif
 1812 
 1813         /*
 1814          * Syncronize DMA buffers.
 1815          */
 1816         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
 1817             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1818         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1819             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1820 
 1821         for (;;) {
 1822                 txs = sc->txs_base + sc->txs_ack;
 1823                 flags = le16toh(txs->flags);
 1824                 if ((flags & AE_TXS_UPDATE) == 0)
 1825                         break;
 1826                 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
 1827                 /* Update stats. */
 1828                 ae_update_stats_tx(flags, &sc->stats);
 1829 
 1830                 /*
 1831                  * Update TxS position.
 1832                  */
 1833                 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
 1834                 sc->flags |= AE_FLAG_TXAVAIL;
 1835 
 1836                 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
 1837                 if (txs->len != txd->len)
 1838                         device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
 1839                             le16toh(txs->len), le16toh(txd->len));
 1840 
 1841                 /*
 1842                  * Move txd ack and align on 4-byte boundary.
 1843                  */
 1844                 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
 1845                     sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
 1846 
 1847                 if ((flags & AE_TXS_SUCCESS) != 0)
 1848                         ifp->if_opackets++;
 1849                 else
 1850                         ifp->if_oerrors++;
 1851 
 1852                 sc->tx_inproc--;
 1853 
 1854                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1855         }
 1856 
 1857         if (sc->tx_inproc < 0) {
 1858                 if_printf(ifp, "Received stray Tx interrupt(s).\n");
 1859                 sc->tx_inproc = 0;
 1860         }
 1861 
 1862         if (sc->tx_inproc == 0)
 1863                 sc->wd_timer = 0;       /* Unarm watchdog. */
 1864         
 1865         if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
 1866                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1867                         ae_start_locked(ifp);
 1868         }
 1869 
 1870         /*
 1871          * Syncronize DMA buffers.
 1872          */
 1873         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
 1874             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1875         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1876             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1877 }
 1878 
 1879 static int
 1880 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
 1881 {
 1882         struct ifnet *ifp;
 1883         struct mbuf *m;
 1884         unsigned int size;
 1885         uint16_t flags;
 1886 
 1887         AE_LOCK_ASSERT(sc);
 1888 
 1889         ifp = sc->ifp;
 1890         flags = le16toh(rxd->flags);
 1891 
 1892 #ifdef AE_DEBUG
 1893         if_printf(ifp, "Rx interrupt occuried.\n");
 1894 #endif
 1895         size = le16toh(rxd->len) - ETHER_CRC_LEN;
 1896         if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
 1897                 if_printf(ifp, "Runt frame received.");
 1898                 return (EIO);
 1899         }
 1900 
 1901         m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
 1902         if (m == NULL)
 1903                 return (ENOBUFS);
 1904 
 1905         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 1906             (flags & AE_RXD_HAS_VLAN) != 0) {
 1907                 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
 1908                 m->m_flags |= M_VLANTAG;
 1909         }
 1910 
 1911         /*
 1912          * Pass it through.
 1913          */
 1914         AE_UNLOCK(sc);
 1915         (*ifp->if_input)(ifp, m);
 1916         AE_LOCK(sc);
 1917 
 1918         return (0);
 1919 }
 1920 
 1921 static void
 1922 ae_rx_intr(ae_softc_t *sc)
 1923 {
 1924         ae_rxd_t *rxd;
 1925         struct ifnet *ifp;
 1926         uint16_t flags;
 1927         int error;
 1928 
 1929         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 1930 
 1931         AE_LOCK_ASSERT(sc);
 1932 
 1933         ifp = sc->ifp;
 1934 
 1935         /*
 1936          * Syncronize DMA buffers.
 1937          */
 1938         bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
 1939             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1940 
 1941         for (;;) {
 1942                 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
 1943                 flags = le16toh(rxd->flags);
 1944                 if ((flags & AE_RXD_UPDATE) == 0)
 1945                         break;
 1946                 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
 1947                 /* Update stats. */
 1948                 ae_update_stats_rx(flags, &sc->stats);
 1949 
 1950                 /*
 1951                  * Update position index.
 1952                  */
 1953                 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
 1954 
 1955                 if ((flags & AE_RXD_SUCCESS) == 0) {
 1956                         ifp->if_ierrors++;
 1957                         continue;
 1958                 }
 1959                 error = ae_rxeof(sc, rxd);
 1960                 if (error != 0) {
 1961                         ifp->if_ierrors++;
 1962                         continue;
 1963                 } else {
 1964                         ifp->if_ipackets++;
 1965                 }
 1966         }
 1967 
 1968         /*
 1969          * Update Rx index.
 1970          */
 1971         AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
 1972 }
 1973 
 1974 static void
 1975 ae_watchdog(ae_softc_t *sc)
 1976 {
 1977         struct ifnet *ifp;
 1978 
 1979         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 1980         AE_LOCK_ASSERT(sc);
 1981         ifp = sc->ifp;
 1982 
 1983         if (sc->wd_timer == 0 || --sc->wd_timer != 0)
 1984                 return;         /* Noting to do. */
 1985 
 1986         if ((sc->flags & AE_FLAG_LINK) == 0)
 1987                 if_printf(ifp, "watchdog timeout (missed link).\n");
 1988         else
 1989                 if_printf(ifp, "watchdog timeout - resetting.\n");
 1990 
 1991         ifp->if_oerrors++;
 1992         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1993         ae_init_locked(sc);
 1994         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1995                 ae_start_locked(ifp);
 1996 }
 1997 
 1998 static void
 1999 ae_tick(void *arg)
 2000 {
 2001         ae_softc_t *sc;
 2002         struct mii_data *mii;
 2003 
 2004         sc = (ae_softc_t *)arg;
 2005         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 2006         AE_LOCK_ASSERT(sc);
 2007 
 2008         mii = device_get_softc(sc->miibus);
 2009         mii_tick(mii);
 2010         ae_watchdog(sc);        /* Watchdog check. */
 2011         callout_reset(&sc->tick_ch, hz, ae_tick, sc);
 2012 }
 2013 
 2014 static void
 2015 ae_rxvlan(ae_softc_t *sc)
 2016 {
 2017         struct ifnet *ifp;
 2018         uint32_t val;
 2019 
 2020         AE_LOCK_ASSERT(sc);
 2021         ifp = sc->ifp;
 2022         val = AE_READ_4(sc, AE_MAC_REG);
 2023         val &= ~AE_MAC_RMVLAN_EN;
 2024         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2025                 val |= AE_MAC_RMVLAN_EN;
 2026         AE_WRITE_4(sc, AE_MAC_REG, val);
 2027 }
 2028 
 2029 static void
 2030 ae_rxfilter(ae_softc_t *sc)
 2031 {
 2032         struct ifnet *ifp;
 2033         struct ifmultiaddr *ifma;
 2034         uint32_t crc;
 2035         uint32_t mchash[2];
 2036         uint32_t rxcfg;
 2037 
 2038         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 2039 
 2040         AE_LOCK_ASSERT(sc);
 2041 
 2042         ifp = sc->ifp;
 2043 
 2044         rxcfg = AE_READ_4(sc, AE_MAC_REG);
 2045         rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
 2046 
 2047         if ((ifp->if_flags & IFF_BROADCAST) != 0)
 2048                 rxcfg |= AE_MAC_BCAST_EN;
 2049         if ((ifp->if_flags & IFF_PROMISC) != 0)
 2050                 rxcfg |= AE_MAC_PROMISC_EN;
 2051         if ((ifp->if_flags & IFF_ALLMULTI) != 0)
 2052                 rxcfg |= AE_MAC_MCAST_EN;
 2053 
 2054         /*
 2055          * Wipe old settings.
 2056          */
 2057         AE_WRITE_4(sc, AE_REG_MHT0, 0);
 2058         AE_WRITE_4(sc, AE_REG_MHT1, 0);
 2059         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 2060                 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
 2061                 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
 2062                 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
 2063                 return;
 2064         }
 2065 
 2066         /*
 2067          * Load multicast tables.
 2068          */
 2069         bzero(mchash, sizeof(mchash));
 2070         if_maddr_rlock(ifp);
 2071         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2072                 if (ifma->ifma_addr->sa_family != AF_LINK)
 2073                         continue;
 2074                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
 2075                         ifma->ifma_addr), ETHER_ADDR_LEN);
 2076                 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
 2077         }
 2078         if_maddr_runlock(ifp);
 2079         AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
 2080         AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
 2081         AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
 2082 }
 2083 
 2084 static int
 2085 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 2086 {
 2087         struct ae_softc *sc;
 2088         struct ifreq *ifr;
 2089         struct mii_data *mii;
 2090         int error, mask;
 2091 
 2092         sc = ifp->if_softc;
 2093         ifr = (struct ifreq *)data;
 2094         error = 0;
 2095 
 2096         switch (cmd) {
 2097         case SIOCSIFMTU:
 2098                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
 2099                         error = EINVAL;
 2100                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 2101                         AE_LOCK(sc);
 2102                         ifp->if_mtu = ifr->ifr_mtu;
 2103                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2104                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2105                                 ae_init_locked(sc);
 2106                         }
 2107                         AE_UNLOCK(sc);
 2108                 }
 2109                 break;
 2110         case SIOCSIFFLAGS:
 2111                 AE_LOCK(sc);
 2112                 if ((ifp->if_flags & IFF_UP) != 0) {
 2113                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2114                                 if (((ifp->if_flags ^ sc->if_flags)
 2115                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 2116                                         ae_rxfilter(sc);
 2117                         } else {
 2118                                 if ((sc->flags & AE_FLAG_DETACH) == 0)
 2119                                         ae_init_locked(sc);
 2120                         }
 2121                 } else {
 2122                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2123                                 ae_stop(sc);
 2124                 }
 2125                 sc->if_flags = ifp->if_flags;
 2126                 AE_UNLOCK(sc);
 2127                 break;
 2128         case SIOCADDMULTI:
 2129         case SIOCDELMULTI:
 2130                 AE_LOCK(sc);
 2131                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2132                         ae_rxfilter(sc);
 2133                 AE_UNLOCK(sc);
 2134                 break;
 2135         case SIOCSIFMEDIA:
 2136         case SIOCGIFMEDIA:
 2137                 mii = device_get_softc(sc->miibus);
 2138                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 2139                 break;
 2140         case SIOCSIFCAP:
 2141                 AE_LOCK(sc);
 2142                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2143                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 2144                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
 2145                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2146                         ae_rxvlan(sc);
 2147                 }
 2148                 VLAN_CAPABILITIES(ifp);
 2149                 AE_UNLOCK(sc);
 2150                 break;
 2151         default:
 2152                 error = ether_ioctl(ifp, cmd, data);
 2153                 break;
 2154         }
 2155         return (error);
 2156 }
 2157 
 2158 static void
 2159 ae_stop(ae_softc_t *sc)
 2160 {
 2161         struct ifnet *ifp;
 2162         int i;
 2163 
 2164         AE_LOCK_ASSERT(sc);
 2165 
 2166         ifp = sc->ifp;
 2167         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2168         sc->flags &= ~AE_FLAG_LINK;
 2169         sc->wd_timer = 0;       /* Cancel watchdog. */
 2170         callout_stop(&sc->tick_ch);
 2171 
 2172         /*
 2173          * Clear and disable interrupts.
 2174          */
 2175         AE_WRITE_4(sc, AE_IMR_REG, 0);
 2176         AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
 2177 
 2178         /*
 2179          * Stop Rx/Tx MACs.
 2180          */
 2181         ae_stop_txmac(sc);
 2182         ae_stop_rxmac(sc);
 2183 
 2184         /*
 2185          * Stop DMA engines.
 2186          */
 2187         AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
 2188         AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
 2189 
 2190         /*
 2191          * Wait for everything to enter idle state.
 2192          */
 2193         for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
 2194                 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
 2195                         break;
 2196                 DELAY(100);
 2197         }
 2198         if (i == AE_IDLE_TIMEOUT)
 2199                 device_printf(sc->dev, "could not enter idle state in stop.\n");
 2200 }
 2201 
 2202 static void
 2203 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
 2204 {
 2205 
 2206         if ((flags & AE_TXS_BCAST) != 0)
 2207                 stats->tx_bcast++;
 2208         if ((flags & AE_TXS_MCAST) != 0)
 2209                 stats->tx_mcast++;
 2210         if ((flags & AE_TXS_PAUSE) != 0)
 2211                 stats->tx_pause++;
 2212         if ((flags & AE_TXS_CTRL) != 0)
 2213                 stats->tx_ctrl++;
 2214         if ((flags & AE_TXS_DEFER) != 0)
 2215                 stats->tx_defer++;
 2216         if ((flags & AE_TXS_EXCDEFER) != 0)
 2217                 stats->tx_excdefer++;
 2218         if ((flags & AE_TXS_SINGLECOL) != 0)
 2219                 stats->tx_singlecol++;
 2220         if ((flags & AE_TXS_MULTICOL) != 0)
 2221                 stats->tx_multicol++;
 2222         if ((flags & AE_TXS_LATECOL) != 0)
 2223                 stats->tx_latecol++;
 2224         if ((flags & AE_TXS_ABORTCOL) != 0)
 2225                 stats->tx_abortcol++;
 2226         if ((flags & AE_TXS_UNDERRUN) != 0)
 2227                 stats->tx_underrun++;
 2228 }
 2229 
 2230 static void
 2231 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
 2232 {
 2233 
 2234         if ((flags & AE_RXD_BCAST) != 0)
 2235                 stats->rx_bcast++;
 2236         if ((flags & AE_RXD_MCAST) != 0)
 2237                 stats->rx_mcast++;
 2238         if ((flags & AE_RXD_PAUSE) != 0)
 2239                 stats->rx_pause++;
 2240         if ((flags & AE_RXD_CTRL) != 0)
 2241                 stats->rx_ctrl++;
 2242         if ((flags & AE_RXD_CRCERR) != 0)
 2243                 stats->rx_crcerr++;
 2244         if ((flags & AE_RXD_CODEERR) != 0)
 2245                 stats->rx_codeerr++;
 2246         if ((flags & AE_RXD_RUNT) != 0)
 2247                 stats->rx_runt++;
 2248         if ((flags & AE_RXD_FRAG) != 0)
 2249                 stats->rx_frag++;
 2250         if ((flags & AE_RXD_TRUNC) != 0)
 2251                 stats->rx_trunc++;
 2252         if ((flags & AE_RXD_ALIGN) != 0)
 2253                 stats->rx_align++;
 2254 }

Cache object: d38a803dc85befcaf325987d01778e00


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.