The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ae/if_ae.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   24  *
   25  * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
   26  *
   27  * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD$");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/endian.h>
   37 #include <sys/kernel.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mbuf.h>
   40 #include <sys/rman.h>
   41 #include <sys/module.h>
   42 #include <sys/queue.h>
   43 #include <sys/socket.h>
   44 #include <sys/sockio.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <net/bpf.h>
   49 #include <net/if.h>
   50 #include <net/if_arp.h>
   51 #include <net/ethernet.h>
   52 #include <net/if_dl.h>
   53 #include <net/if_media.h>
   54 #include <net/if_types.h>
   55 #include <net/if_vlan_var.h>
   56 
   57 #include <netinet/in.h>
   58 #include <netinet/in_systm.h>
   59 #include <netinet/ip.h>
   60 #include <netinet/tcp.h>
   61 
   62 #include <dev/mii/mii.h>
   63 #include <dev/mii/miivar.h>
   64 #include <dev/pci/pcireg.h>
   65 #include <dev/pci/pcivar.h>
   66 
   67 #include <machine/bus.h>
   68 
   69 #include "miibus_if.h"
   70 
   71 #include "if_aereg.h"
   72 #include "if_aevar.h"
   73 
   74 /*
   75  * Devices supported by this driver.
   76  */
   77 static struct ae_dev {
   78         uint16_t        vendorid;
   79         uint16_t        deviceid;
   80         const char      *name;
   81 } ae_devs[] = {
   82         { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
   83                 "Attansic Technology Corp, L2 FastEthernet" },
   84 };
   85 #define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs))
   86 
   87 static struct resource_spec ae_res_spec_mem[] = {
   88         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
   89         { -1,                   0,              0 }
   90 };
   91 static struct resource_spec ae_res_spec_irq[] = {
   92         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
   93         { -1,                   0,              0 }
   94 };
   95 static struct resource_spec ae_res_spec_msi[] = {
   96         { SYS_RES_IRQ,          1,              RF_ACTIVE },
   97         { -1,                   0,              0 }
   98 };
   99 
  100 static int      ae_probe(device_t dev);
  101 static int      ae_attach(device_t dev);
  102 static void     ae_pcie_init(ae_softc_t *sc);
  103 static void     ae_phy_reset(ae_softc_t *sc);
  104 static void     ae_phy_init(ae_softc_t *sc);
  105 static int      ae_reset(ae_softc_t *sc);
  106 static void     ae_init(void *arg);
  107 static int      ae_init_locked(ae_softc_t *sc);
  108 static int      ae_detach(device_t dev);
  109 static int      ae_miibus_readreg(device_t dev, int phy, int reg);
  110 static int      ae_miibus_writereg(device_t dev, int phy, int reg, int val);
  111 static void     ae_miibus_statchg(device_t dev);
  112 static void     ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
  113 static int      ae_mediachange(struct ifnet *ifp);
  114 static void     ae_retrieve_address(ae_softc_t *sc);
  115 static void     ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
  116     int error);
  117 static int      ae_alloc_rings(ae_softc_t *sc);
  118 static void     ae_dma_free(ae_softc_t *sc);
  119 static int      ae_shutdown(device_t dev);
  120 static int      ae_suspend(device_t dev);
  121 static void     ae_powersave_disable(ae_softc_t *sc);
  122 static void     ae_powersave_enable(ae_softc_t *sc);
  123 static int      ae_resume(device_t dev);
  124 static unsigned int     ae_tx_avail_size(ae_softc_t *sc);
  125 static int      ae_encap(ae_softc_t *sc, struct mbuf **m_head);
  126 static void     ae_start(struct ifnet *ifp);
  127 static void     ae_start_locked(struct ifnet *ifp);
  128 static void     ae_link_task(void *arg, int pending);
  129 static void     ae_stop_rxmac(ae_softc_t *sc);
  130 static void     ae_stop_txmac(ae_softc_t *sc);
  131 static void     ae_mac_config(ae_softc_t *sc);
  132 static int      ae_intr(void *arg);
  133 static void     ae_int_task(void *arg, int pending);
  134 static void     ae_tx_intr(ae_softc_t *sc);
  135 static void     ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
  136 static void     ae_rx_intr(ae_softc_t *sc);
  137 static void     ae_watchdog(ae_softc_t *sc);
  138 static void     ae_tick(void *arg);
  139 static void     ae_rxfilter(ae_softc_t *sc);
  140 static void     ae_rxvlan(ae_softc_t *sc);
  141 static int      ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
  142 static void     ae_stop(ae_softc_t *sc);
  143 static int      ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
  144 static int      ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
  145 static int      ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
  146 static int      ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
  147 static void     ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
  148 static void     ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
  149 static void     ae_init_tunables(ae_softc_t *sc);
  150 
  151 static device_method_t ae_methods[] = {
  152         /* Device interface. */
  153         DEVMETHOD(device_probe,         ae_probe),
  154         DEVMETHOD(device_attach,        ae_attach),
  155         DEVMETHOD(device_detach,        ae_detach),
  156         DEVMETHOD(device_shutdown,      ae_shutdown),
  157         DEVMETHOD(device_suspend,       ae_suspend),
  158         DEVMETHOD(device_resume,        ae_resume),
  159 
  160         /* MII interface. */
  161         DEVMETHOD(miibus_readreg,       ae_miibus_readreg),
  162         DEVMETHOD(miibus_writereg,      ae_miibus_writereg),
  163         DEVMETHOD(miibus_statchg,       ae_miibus_statchg),
  164 
  165         { NULL, NULL }
  166 };
  167 static driver_t ae_driver = {
  168         "ae",
  169         ae_methods,
  170         sizeof(ae_softc_t)
  171 };
  172 static devclass_t ae_devclass;
  173 
  174 DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0);
  175 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0);
  176 MODULE_DEPEND(ae, pci, 1, 1, 1);
  177 MODULE_DEPEND(ae, ether, 1, 1, 1);
  178 MODULE_DEPEND(ae, miibus, 1, 1, 1);
  179 
  180 /*
  181  * Tunables.
  182  */
  183 static int msi_disable = 0;
  184 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
  185 
  186 #define AE_READ_4(sc, reg) \
  187         bus_read_4((sc)->mem[0], (reg))
  188 #define AE_READ_2(sc, reg) \
  189         bus_read_2((sc)->mem[0], (reg))
  190 #define AE_READ_1(sc, reg) \
  191         bus_read_1((sc)->mem[0], (reg))
  192 #define AE_WRITE_4(sc, reg, val) \
  193         bus_write_4((sc)->mem[0], (reg), (val))
  194 #define AE_WRITE_2(sc, reg, val) \
  195         bus_write_2((sc)->mem[0], (reg), (val))
  196 #define AE_WRITE_1(sc, reg, val) \
  197         bus_write_1((sc)->mem[0], (reg), (val))
  198 #define AE_PHY_READ(sc, reg) \
  199         ae_miibus_readreg(sc->dev, 0, reg)
  200 #define AE_PHY_WRITE(sc, reg, val) \
  201         ae_miibus_writereg(sc->dev, 0, reg, val)
  202 #define AE_CHECK_EADDR_VALID(eaddr) \
  203         ((eaddr[0] == 0 && eaddr[1] == 0) || \
  204         (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
  205 #define AE_RXD_VLAN(vtag) \
  206         (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
  207 #define AE_TXD_VLAN(vtag) \
  208         (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
  209 
  210 static int
  211 ae_probe(device_t dev)
  212 {
  213         uint16_t deviceid, vendorid;
  214         int i;
  215 
  216         vendorid = pci_get_vendor(dev);
  217         deviceid = pci_get_device(dev);
  218 
  219         /*
  220          * Search through the list of supported devs for matching one.
  221          */
  222         for (i = 0; i < AE_DEVS_COUNT; i++) {
  223                 if (vendorid == ae_devs[i].vendorid &&
  224                     deviceid == ae_devs[i].deviceid) {
  225                         device_set_desc(dev, ae_devs[i].name);
  226                         return (BUS_PROBE_DEFAULT);
  227                 }
  228         }
  229         return (ENXIO);
  230 }
  231 
  232 static int
  233 ae_attach(device_t dev)
  234 {
  235         ae_softc_t *sc;
  236         struct ifnet *ifp;
  237         uint8_t chiprev;
  238         uint32_t pcirev;
  239         int nmsi, pmc;
  240         int error;
  241 
  242         sc = device_get_softc(dev); /* Automatically allocated and zeroed
  243                                        on attach. */
  244         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  245         sc->dev = dev;
  246 
  247         /*
  248          * Initialize mutexes and tasks.
  249          */
  250         mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
  251         callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
  252         TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
  253         TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
  254 
  255         pci_enable_busmaster(dev);              /* Enable bus mastering. */
  256 
  257         sc->spec_mem = ae_res_spec_mem;
  258 
  259         /*
  260          * Allocate memory-mapped registers.
  261          */
  262         error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
  263         if (error != 0) {
  264                 device_printf(dev, "could not allocate memory resources.\n");
  265                 sc->spec_mem = NULL;
  266                 goto fail;
  267         }
  268 
  269         /*
  270          * Retrieve PCI and chip revisions.
  271          */
  272         pcirev = pci_get_revid(dev);
  273         chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
  274             AE_MASTER_REVNUM_MASK;
  275         if (bootverbose) {
  276                 device_printf(dev, "pci device revision: %#04x\n", pcirev);
  277                 device_printf(dev, "chip id: %#02x\n", chiprev);
  278         }
  279         nmsi = pci_msi_count(dev);
  280         if (bootverbose)
  281                 device_printf(dev, "MSI count: %d.\n", nmsi);
  282 
  283         /*
  284          * Allocate interrupt resources.
  285          */
  286         if (msi_disable == 0 && nmsi == 1) {
  287                 error = pci_alloc_msi(dev, &nmsi);
  288                 if (error == 0) {
  289                         device_printf(dev, "Using MSI messages.\n");
  290                         sc->spec_irq = ae_res_spec_msi;
  291                         error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
  292                         if (error != 0) {
  293                                 device_printf(dev, "MSI allocation failed.\n");
  294                                 sc->spec_irq = NULL;
  295                                 pci_release_msi(dev);
  296                         } else {
  297                                 sc->flags |= AE_FLAG_MSI;
  298                         }
  299                 }
  300         }
  301         if (sc->spec_irq == NULL) {
  302                 sc->spec_irq = ae_res_spec_irq;
  303                 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
  304                 if (error != 0) {
  305                         device_printf(dev, "could not allocate IRQ resources.\n");
  306                         sc->spec_irq = NULL;
  307                         goto fail;
  308                 }
  309         }
  310         
  311         ae_init_tunables(sc);
  312 
  313         ae_phy_reset(sc);               /* Reset PHY. */
  314         error = ae_reset(sc);           /* Reset the controller itself. */
  315         if (error != 0)
  316                 goto fail;
  317 
  318         ae_pcie_init(sc);
  319 
  320         ae_retrieve_address(sc);        /* Load MAC address. */
  321 
  322         error = ae_alloc_rings(sc);     /* Allocate ring buffers. */
  323         if (error != 0)
  324                 goto fail;
  325 
  326         ifp = sc->ifp = if_alloc(IFT_ETHER);
  327         if (ifp == NULL) {
  328                 device_printf(dev, "could not allocate ifnet structure.\n");
  329                 error = ENXIO;
  330                 goto fail;
  331         }
  332 
  333         ifp->if_softc = sc;
  334         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  335         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  336         ifp->if_ioctl = ae_ioctl;
  337         ifp->if_start = ae_start;
  338         ifp->if_init = ae_init;
  339         ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
  340         ifp->if_hwassist = 0;
  341         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
  342         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  343         IFQ_SET_READY(&ifp->if_snd);
  344         if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
  345                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  346                 sc->flags |= AE_FLAG_PMG;
  347         }
  348         ifp->if_capenable = ifp->if_capabilities;
  349 
  350         /*
  351          * Configure and attach MII bus.
  352          */
  353         error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
  354             ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
  355             MII_OFFSET_ANY, 0);
  356         if (error != 0) {
  357                 device_printf(dev, "attaching PHYs failed\n");
  358                 goto fail;
  359         }
  360 
  361         ether_ifattach(ifp, sc->eaddr);
  362         /* Tell the upper layer(s) we support long frames. */
  363         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  364 
  365         /*
  366          * Create and run all helper tasks.
  367          */
  368         sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
  369             taskqueue_thread_enqueue, &sc->tq);
  370         if (sc->tq == NULL) {
  371                 device_printf(dev, "could not create taskqueue.\n");
  372                 ether_ifdetach(ifp);
  373                 error = ENXIO;
  374                 goto fail;
  375         }
  376         taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
  377             device_get_nameunit(sc->dev));
  378 
  379         /*
  380          * Configure interrupt handlers.
  381          */
  382         error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
  383             ae_intr, NULL, sc, &sc->intrhand);
  384         if (error != 0) {
  385                 device_printf(dev, "could not set up interrupt handler.\n");
  386                 taskqueue_free(sc->tq);
  387                 sc->tq = NULL;
  388                 ether_ifdetach(ifp);
  389                 goto fail;
  390         }
  391 
  392 fail:
  393         if (error != 0)
  394                 ae_detach(dev);
  395         
  396         return (error);
  397 }
  398 
  399 #define AE_SYSCTL(stx, parent, name, desc, ptr) \
  400         SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
  401 
  402 static void
  403 ae_init_tunables(ae_softc_t *sc)
  404 {
  405         struct sysctl_ctx_list *ctx;
  406         struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
  407         struct ae_stats *ae_stats;
  408 
  409         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  410         ae_stats = &sc->stats;
  411 
  412         ctx = device_get_sysctl_ctx(sc->dev);
  413         root = device_get_sysctl_tree(sc->dev);
  414         stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
  415             CTLFLAG_RD, NULL, "ae statistics");
  416 
  417         /*
  418          * Receiver statistcics.
  419          */
  420         stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
  421             CTLFLAG_RD, NULL, "Rx MAC statistics");
  422         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
  423             "broadcast frames", &ae_stats->rx_bcast);
  424         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
  425             "multicast frames", &ae_stats->rx_mcast);
  426         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
  427             "PAUSE frames", &ae_stats->rx_pause);
  428         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
  429             "control frames", &ae_stats->rx_ctrl);
  430         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
  431             "frames with CRC errors", &ae_stats->rx_crcerr);
  432         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
  433             "frames with invalid opcode", &ae_stats->rx_codeerr);
  434         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
  435             "runt frames", &ae_stats->rx_runt);
  436         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
  437             "fragmented frames", &ae_stats->rx_frag);
  438         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
  439             "frames with alignment errors", &ae_stats->rx_align);
  440         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
  441             "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
  442 
  443         /*
  444          * Receiver statistcics.
  445          */
  446         stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
  447             CTLFLAG_RD, NULL, "Tx MAC statistics");
  448         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
  449             "broadcast frames", &ae_stats->tx_bcast);
  450         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
  451             "multicast frames", &ae_stats->tx_mcast);
  452         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
  453             "PAUSE frames", &ae_stats->tx_pause);
  454         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
  455             "control frames", &ae_stats->tx_ctrl);
  456         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
  457             "deferrals occuried", &ae_stats->tx_defer);
  458         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
  459             "excessive deferrals occuried", &ae_stats->tx_excdefer);
  460         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
  461             "single collisions occuried", &ae_stats->tx_singlecol);
  462         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
  463             "multiple collisions occuried", &ae_stats->tx_multicol);
  464         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
  465             "late collisions occuried", &ae_stats->tx_latecol);
  466         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
  467             "transmit aborts due collisions", &ae_stats->tx_abortcol);
  468         AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
  469             "Tx FIFO underruns", &ae_stats->tx_underrun);
  470 }
  471 
  472 static void
  473 ae_pcie_init(ae_softc_t *sc)
  474 {
  475 
  476         AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
  477         AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
  478 }
  479 
  480 static void
  481 ae_phy_reset(ae_softc_t *sc)
  482 {
  483 
  484         AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
  485         DELAY(1000);    /* XXX: pause(9) ? */
  486 }
  487 
  488 static int
  489 ae_reset(ae_softc_t *sc)
  490 {
  491         int i;
  492 
  493         /*
  494          * Issue a soft reset.
  495          */
  496         AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
  497         bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
  498             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  499         
  500         /*
  501          * Wait for reset to complete.
  502          */
  503         for (i = 0; i < AE_RESET_TIMEOUT; i++) {
  504                 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
  505                         break;
  506                 DELAY(10);
  507         }
  508         if (i == AE_RESET_TIMEOUT) {
  509                 device_printf(sc->dev, "reset timeout.\n");
  510                 return (ENXIO);
  511         }
  512 
  513         /*
  514          * Wait for everything to enter idle state.
  515          */
  516         for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
  517                 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
  518                         break;
  519                 DELAY(100);
  520         }
  521         if (i == AE_IDLE_TIMEOUT) {
  522                 device_printf(sc->dev, "could not enter idle state.\n");
  523                 return (ENXIO);
  524         }
  525         return (0);
  526 }
  527 
  528 static void
  529 ae_init(void *arg)
  530 {
  531         ae_softc_t *sc;
  532 
  533         sc = (ae_softc_t *)arg;
  534         AE_LOCK(sc);
  535         ae_init_locked(sc);
  536         AE_UNLOCK(sc);
  537 }
  538 
  539 static void
  540 ae_phy_init(ae_softc_t *sc)
  541 {
  542 
  543         /*
  544          * Enable link status change interrupt.
  545          * XXX magic numbers.
  546          */
  547 #ifdef notyet
  548         AE_PHY_WRITE(sc, 18, 0xc00);
  549 #endif
  550 }
  551 
  552 static int
  553 ae_init_locked(ae_softc_t *sc)
  554 {
  555         struct ifnet *ifp;
  556         struct mii_data *mii;
  557         uint8_t eaddr[ETHER_ADDR_LEN];
  558         uint32_t val;
  559         bus_addr_t addr;
  560 
  561         AE_LOCK_ASSERT(sc);
  562 
  563         ifp = sc->ifp;
  564         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  565                 return (0);
  566         mii = device_get_softc(sc->miibus);
  567 
  568         ae_stop(sc);
  569         ae_reset(sc);
  570         ae_pcie_init(sc);               /* Initialize PCIE stuff. */
  571         ae_phy_init(sc);
  572         ae_powersave_disable(sc);
  573 
  574         /*
  575          * Clear and disable interrupts.
  576          */
  577         AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
  578 
  579         /*
  580          * Set the MAC address.
  581          */
  582         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
  583         val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
  584         AE_WRITE_4(sc, AE_EADDR0_REG, val);
  585         val = eaddr[0] << 8 | eaddr[1];
  586         AE_WRITE_4(sc, AE_EADDR1_REG, val);
  587 
  588         bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
  589         bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
  590         bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
  591         /*
  592          * Set ring buffers base addresses.
  593          */
  594         addr = sc->dma_rxd_busaddr;
  595         AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
  596         AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
  597         addr = sc->dma_txd_busaddr;
  598         AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
  599         addr = sc->dma_txs_busaddr;
  600         AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
  601 
  602         /*
  603          * Configure ring buffers sizes.
  604          */
  605         AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
  606         AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
  607         AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
  608 
  609         /*
  610          * Configure interframe gap parameters.
  611          */
  612         val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
  613             AE_IFG_TXIPG_MASK) |
  614             ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
  615             AE_IFG_RXIPG_MASK) |
  616             ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
  617             AE_IFG_IPGR1_MASK) |
  618             ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
  619             AE_IFG_IPGR2_MASK);
  620         AE_WRITE_4(sc, AE_IFG_REG, val);
  621 
  622         /*
  623          * Configure half-duplex operation.
  624          */
  625         val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
  626             AE_HDPX_LCOL_MASK) |
  627             ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
  628             AE_HDPX_RETRY_MASK) |
  629             ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
  630             AE_HDPX_ABEBT_MASK) |
  631             ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
  632             AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
  633         AE_WRITE_4(sc, AE_HDPX_REG, val);
  634 
  635         /*
  636          * Configure interrupt moderate timer.
  637          */
  638         AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
  639         val = AE_READ_4(sc, AE_MASTER_REG);
  640         val |= AE_MASTER_IMT_EN;
  641         AE_WRITE_4(sc, AE_MASTER_REG, val);
  642 
  643         /*
  644          * Configure interrupt clearing timer.
  645          */
  646         AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
  647 
  648         /*
  649          * Configure MTU.
  650          */
  651         val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
  652             ETHER_CRC_LEN;
  653         AE_WRITE_2(sc, AE_MTU_REG, val);
  654 
  655         /*
  656          * Configure cut-through threshold.
  657          */
  658         AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
  659 
  660         /*
  661          * Configure flow control.
  662          */
  663         AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
  664         AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
  665             (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
  666             (AE_RXD_COUNT_DEFAULT / 12));
  667 
  668         /*
  669          * Init mailboxes.
  670          */
  671         sc->txd_cur = sc->rxd_cur = 0;
  672         sc->txs_ack = sc->txd_ack = 0;
  673         sc->rxd_cur = 0;
  674         AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
  675         AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
  676 
  677         sc->tx_inproc = 0;      /* Number of packets the chip processes now. */
  678         sc->flags |= AE_FLAG_TXAVAIL;   /* Free Tx's available. */
  679 
  680         /*
  681          * Enable DMA.
  682          */
  683         AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
  684         AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
  685 
  686         /*
  687          * Check if everything is OK.
  688          */
  689         val = AE_READ_4(sc, AE_ISR_REG);
  690         if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
  691                 device_printf(sc->dev, "Initialization failed.\n");
  692                 return (ENXIO);
  693         }
  694 
  695         /*
  696          * Clear interrupt status.
  697          */
  698         AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
  699         AE_WRITE_4(sc, AE_ISR_REG, 0x0);
  700 
  701         /*
  702          * Enable interrupts.
  703          */
  704         val = AE_READ_4(sc, AE_MASTER_REG);
  705         AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
  706         AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
  707 
  708         /*
  709          * Disable WOL.
  710          */
  711         AE_WRITE_4(sc, AE_WOL_REG, 0);
  712 
  713         /*
  714          * Configure MAC.
  715          */
  716         val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
  717             AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
  718             AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
  719             ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
  720             ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
  721             AE_MAC_PREAMBLE_MASK);
  722         AE_WRITE_4(sc, AE_MAC_REG, val);
  723 
  724         /*
  725          * Configure Rx MAC.
  726          */
  727         ae_rxfilter(sc);
  728         ae_rxvlan(sc);
  729 
  730         /*
  731          * Enable Tx/Rx.
  732          */
  733         val = AE_READ_4(sc, AE_MAC_REG);
  734         AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
  735 
  736         sc->flags &= ~AE_FLAG_LINK;
  737         mii_mediachg(mii);      /* Switch to the current media. */
  738 
  739         callout_reset(&sc->tick_ch, hz, ae_tick, sc);
  740 
  741         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  742         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  743 
  744 #ifdef AE_DEBUG
  745         device_printf(sc->dev, "Initialization complete.\n");
  746 #endif
  747 
  748         return (0);
  749 }
  750 
  751 static int
  752 ae_detach(device_t dev)
  753 {
  754         struct ae_softc *sc;
  755         struct ifnet *ifp;
  756 
  757         sc = device_get_softc(dev);
  758         KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
  759         ifp = sc->ifp;
  760         if (device_is_attached(dev)) {
  761                 AE_LOCK(sc);
  762                 sc->flags |= AE_FLAG_DETACH;
  763                 ae_stop(sc);
  764                 AE_UNLOCK(sc);
  765                 callout_drain(&sc->tick_ch);
  766                 taskqueue_drain(sc->tq, &sc->int_task);
  767                 taskqueue_drain(taskqueue_swi, &sc->link_task);
  768                 ether_ifdetach(ifp);
  769         }
  770         if (sc->tq != NULL) {
  771                 taskqueue_drain(sc->tq, &sc->int_task);
  772                 taskqueue_free(sc->tq);
  773                 sc->tq = NULL;
  774         }
  775         if (sc->miibus != NULL) {
  776                 device_delete_child(dev, sc->miibus);
  777                 sc->miibus = NULL;
  778         }
  779         bus_generic_detach(sc->dev);
  780         ae_dma_free(sc);
  781         if (sc->intrhand != NULL) {
  782                 bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
  783                 sc->intrhand = NULL;
  784         }
  785         if (ifp != NULL) {
  786                 if_free(ifp);
  787                 sc->ifp = NULL;
  788         }
  789         if (sc->spec_irq != NULL)
  790                 bus_release_resources(dev, sc->spec_irq, sc->irq);
  791         if (sc->spec_mem != NULL)
  792                 bus_release_resources(dev, sc->spec_mem, sc->mem);
  793         if ((sc->flags & AE_FLAG_MSI) != 0)
  794                 pci_release_msi(dev);
  795         mtx_destroy(&sc->mtx);
  796 
  797         return (0);
  798 }
  799 
  800 static int
  801 ae_miibus_readreg(device_t dev, int phy, int reg)
  802 {
  803         ae_softc_t *sc;
  804         uint32_t val;
  805         int i;
  806 
  807         sc = device_get_softc(dev);
  808         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  809 
  810         /*
  811          * Locking is done in upper layers.
  812          */
  813 
  814         val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
  815             AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
  816             ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
  817         AE_WRITE_4(sc, AE_MDIO_REG, val);
  818 
  819         /*
  820          * Wait for operation to complete.
  821          */
  822         for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
  823                 DELAY(2);
  824                 val = AE_READ_4(sc, AE_MDIO_REG);
  825                 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
  826                         break;
  827         }
  828         if (i == AE_MDIO_TIMEOUT) {
  829                 device_printf(sc->dev, "phy read timeout: %d.\n", reg);
  830                 return (0);
  831         }
  832         return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
  833 }
  834 
  835 static int
  836 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
  837 {
  838         ae_softc_t *sc;
  839         uint32_t aereg;
  840         int i;
  841 
  842         sc = device_get_softc(dev);
  843         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  844 
  845         /*
  846          * Locking is done in upper layers.
  847          */
  848 
  849         aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
  850             AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
  851             ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
  852             ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
  853         AE_WRITE_4(sc, AE_MDIO_REG, aereg);
  854 
  855         /*
  856          * Wait for operation to complete.
  857          */
  858         for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
  859                 DELAY(2);
  860                 aereg = AE_READ_4(sc, AE_MDIO_REG);
  861                 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
  862                         break;
  863         }
  864         if (i == AE_MDIO_TIMEOUT) {
  865                 device_printf(sc->dev, "phy write timeout: %d.\n", reg);
  866         }
  867         return (0);
  868 }
  869 
  870 static void
  871 ae_miibus_statchg(device_t dev)
  872 {
  873         ae_softc_t *sc;
  874 
  875         sc = device_get_softc(dev);
  876         taskqueue_enqueue(taskqueue_swi, &sc->link_task);
  877 }
  878 
  879 static void
  880 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  881 {
  882         ae_softc_t *sc;
  883         struct mii_data *mii;
  884 
  885         sc = ifp->if_softc;
  886         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  887 
  888         AE_LOCK(sc);
  889         mii = device_get_softc(sc->miibus);
  890         mii_pollstat(mii);
  891         ifmr->ifm_status = mii->mii_media_status;
  892         ifmr->ifm_active = mii->mii_media_active;
  893         AE_UNLOCK(sc);
  894 }
  895 
  896 static int
  897 ae_mediachange(struct ifnet *ifp)
  898 {
  899         ae_softc_t *sc;
  900         struct mii_data *mii;
  901         struct mii_softc *mii_sc;
  902         int error;
  903 
  904         /* XXX: check IFF_UP ?? */
  905         sc = ifp->if_softc;
  906         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  907         AE_LOCK(sc);
  908         mii = device_get_softc(sc->miibus);
  909         LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
  910                 PHY_RESET(mii_sc);
  911         error = mii_mediachg(mii);
  912         AE_UNLOCK(sc);
  913 
  914         return (error);
  915 }
  916 
  917 static int
  918 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
  919 {
  920         int error;
  921         uint32_t val;
  922 
  923         KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
  924 
  925         /*
  926          * Not sure why, but Linux does this.
  927          */
  928         val = AE_READ_4(sc, AE_SPICTL_REG);
  929         if ((val & AE_SPICTL_VPD_EN) != 0) {
  930                 val &= ~AE_SPICTL_VPD_EN;
  931                 AE_WRITE_4(sc, AE_SPICTL_REG, val);
  932         }
  933         error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
  934         return (error);
  935 }
  936 
  937 static int
  938 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
  939 {
  940         uint32_t val;
  941         int i;
  942 
  943         AE_WRITE_4(sc, AE_VPD_DATA_REG, 0);     /* Clear register value. */
  944 
  945         /*
  946          * VPD registers start at offset 0x100. Read them.
  947          */
  948         val = 0x100 + reg * 4;
  949         AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
  950             AE_VPD_CAP_ADDR_MASK);
  951         for (i = 0; i < AE_VPD_TIMEOUT; i++) {
  952                 DELAY(2000);
  953                 val = AE_READ_4(sc, AE_VPD_CAP_REG);
  954                 if ((val & AE_VPD_CAP_DONE) != 0)
  955                         break;
  956         }
  957         if (i == AE_VPD_TIMEOUT) {
  958                 device_printf(sc->dev, "timeout reading VPD register %d.\n",
  959                     reg);
  960                 return (ETIMEDOUT);
  961         }
  962         *word = AE_READ_4(sc, AE_VPD_DATA_REG);
  963         return (0);
  964 }
  965 
  966 static int
  967 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
  968 {
  969         uint32_t word, reg, val;
  970         int error;
  971         int found;
  972         int vpdc;
  973         int i;
  974 
  975         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
  976         KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
  977 
  978         /*
  979          * Check for EEPROM.
  980          */
  981         error = ae_check_eeprom_present(sc, &vpdc);
  982         if (error != 0)
  983                 return (error);
  984 
  985         /*
  986          * Read the VPD configuration space.
  987          * Each register is prefixed with signature,
  988          * so we can check if it is valid.
  989          */
  990         for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
  991                 error = ae_vpd_read_word(sc, i, &word);
  992                 if (error != 0)
  993                         break;
  994 
  995                 /*
  996                  * Check signature.
  997                  */
  998                 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
  999                         break;
 1000                 reg = word >> AE_VPD_REG_SHIFT;
 1001                 i++;    /* Move to the next word. */
 1002 
 1003                 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
 1004                         continue;
 1005 
 1006                 error = ae_vpd_read_word(sc, i, &val);
 1007                 if (error != 0)
 1008                         break;
 1009                 if (reg == AE_EADDR0_REG)
 1010                         eaddr[0] = val;
 1011                 else
 1012                         eaddr[1] = val;
 1013                 found++;
 1014         }
 1015 
 1016         if (found < 2)
 1017                 return (ENOENT);
 1018         
 1019         eaddr[1] &= 0xffff;     /* Only last 2 bytes are used. */
 1020         if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
 1021                 if (bootverbose)
 1022                         device_printf(sc->dev,
 1023                             "VPD ethernet address registers are invalid.\n");
 1024                 return (EINVAL);
 1025         }
 1026         return (0);
 1027 }
 1028 
 1029 static int
 1030 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
 1031 {
 1032 
 1033         /*
 1034          * BIOS is supposed to set this.
 1035          */
 1036         eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
 1037         eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
 1038         eaddr[1] &= 0xffff;     /* Only last 2 bytes are used. */
 1039 
 1040         if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
 1041                 if (bootverbose)
 1042                         device_printf(sc->dev,
 1043                             "Ethernet address registers are invalid.\n");
 1044                 return (EINVAL);
 1045         }
 1046         return (0);
 1047 }
 1048 
 1049 static void
 1050 ae_retrieve_address(ae_softc_t *sc)
 1051 {
 1052         uint32_t eaddr[2] = {0, 0};
 1053         int error;
 1054 
 1055         /*
 1056          *Check for EEPROM.
 1057          */
 1058         error = ae_get_vpd_eaddr(sc, eaddr);
 1059         if (error != 0)
 1060                 error = ae_get_reg_eaddr(sc, eaddr);
 1061         if (error != 0) {
 1062                 if (bootverbose)
 1063                         device_printf(sc->dev,
 1064                             "Generating random ethernet address.\n");
 1065                 eaddr[0] = arc4random();
 1066 
 1067                 /*
 1068                  * Set OUI to ASUSTek COMPUTER INC.
 1069                  */
 1070                 sc->eaddr[0] = 0x02;    /* U/L bit set. */
 1071                 sc->eaddr[1] = 0x1f;
 1072                 sc->eaddr[2] = 0xc6;
 1073                 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
 1074                 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
 1075                 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
 1076         } else {
 1077                 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
 1078                 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
 1079                 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
 1080                 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
 1081                 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
 1082                 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
 1083         }
 1084 }
 1085 
 1086 static void
 1087 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1088 {
 1089         bus_addr_t *addr = arg;
 1090 
 1091         if (error != 0)
 1092                 return;
 1093         KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
 1094             nsegs));
 1095         *addr = segs[0].ds_addr;
 1096 }
 1097 
 1098 static int
 1099 ae_alloc_rings(ae_softc_t *sc)
 1100 {
 1101         bus_addr_t busaddr;
 1102         int error;
 1103 
 1104         /*
 1105          * Create parent DMA tag.
 1106          */
 1107         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
 1108             1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
 1109             NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
 1110             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
 1111             &sc->dma_parent_tag);
 1112         if (error != 0) {
 1113                 device_printf(sc->dev, "could not creare parent DMA tag.\n");
 1114                 return (error);
 1115         }
 1116 
 1117         /*
 1118          * Create DMA tag for TxD.
 1119          */
 1120         error = bus_dma_tag_create(sc->dma_parent_tag,
 1121             8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1122             NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
 1123             AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
 1124             &sc->dma_txd_tag);
 1125         if (error != 0) {
 1126                 device_printf(sc->dev, "could not creare TxD DMA tag.\n");
 1127                 return (error);
 1128         }
 1129 
 1130         /*
 1131          * Create DMA tag for TxS.
 1132          */
 1133         error = bus_dma_tag_create(sc->dma_parent_tag,
 1134             8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1135             NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
 1136             AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
 1137             &sc->dma_txs_tag);
 1138         if (error != 0) {
 1139                 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
 1140                 return (error);
 1141         }
 1142 
 1143         /*
 1144          * Create DMA tag for RxD.
 1145          */
 1146         error = bus_dma_tag_create(sc->dma_parent_tag,
 1147             128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1148             NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
 1149             AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
 1150             &sc->dma_rxd_tag);
 1151         if (error != 0) {
 1152                 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
 1153                 return (error);
 1154         }
 1155 
 1156         /*
 1157          * Allocate TxD DMA memory.
 1158          */
 1159         error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
 1160             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1161             &sc->dma_txd_map);
 1162         if (error != 0) {
 1163                 device_printf(sc->dev,
 1164                     "could not allocate DMA memory for TxD ring.\n");
 1165                 return (error);
 1166         }
 1167         error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
 1168             AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
 1169         if (error != 0 || busaddr == 0) {
 1170                 device_printf(sc->dev,
 1171                     "could not load DMA map for TxD ring.\n");
 1172                 return (error);
 1173         }
 1174         sc->dma_txd_busaddr = busaddr;
 1175 
 1176         /*
 1177          * Allocate TxS DMA memory.
 1178          */
 1179         error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
 1180             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1181             &sc->dma_txs_map);
 1182         if (error != 0) {
 1183                 device_printf(sc->dev,
 1184                     "could not allocate DMA memory for TxS ring.\n");
 1185                 return (error);
 1186         }
 1187         error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
 1188             AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
 1189         if (error != 0 || busaddr == 0) {
 1190                 device_printf(sc->dev,
 1191                     "could not load DMA map for TxS ring.\n");
 1192                 return (error);
 1193         }
 1194         sc->dma_txs_busaddr = busaddr;
 1195 
 1196         /*
 1197          * Allocate RxD DMA memory.
 1198          */
 1199         error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
 1200             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1201             &sc->dma_rxd_map);
 1202         if (error != 0) {
 1203                 device_printf(sc->dev,
 1204                     "could not allocate DMA memory for RxD ring.\n");
 1205                 return (error);
 1206         }
 1207         error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
 1208             sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
 1209             ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
 1210         if (error != 0 || busaddr == 0) {
 1211                 device_printf(sc->dev,
 1212                     "could not load DMA map for RxD ring.\n");
 1213                 return (error);
 1214         }
 1215         sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
 1216         sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
 1217 
 1218         return (0);
 1219 }
 1220 
 1221 static void
 1222 ae_dma_free(ae_softc_t *sc)
 1223 {
 1224 
 1225         if (sc->dma_txd_tag != NULL) {
 1226                 if (sc->dma_txd_map != NULL) {
 1227                         bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
 1228                         if (sc->txd_base != NULL)
 1229                                 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
 1230                                     sc->dma_txd_map);
 1231 
 1232                 }
 1233                 bus_dma_tag_destroy(sc->dma_txd_tag);
 1234                 sc->dma_txd_map = NULL;
 1235                 sc->dma_txd_tag = NULL;
 1236                 sc->txd_base = NULL;
 1237         }
 1238         if (sc->dma_txs_tag != NULL) {
 1239                 if (sc->dma_txs_map != NULL) {
 1240                         bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
 1241                         if (sc->txs_base != NULL)
 1242                                 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
 1243                                     sc->dma_txs_map);
 1244 
 1245                 }
 1246                 bus_dma_tag_destroy(sc->dma_txs_tag);
 1247                 sc->dma_txs_map = NULL;
 1248                 sc->dma_txs_tag = NULL;
 1249                 sc->txs_base = NULL;
 1250         }
 1251         if (sc->dma_rxd_tag != NULL) {
 1252                 if (sc->dma_rxd_map != NULL) {
 1253                         bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
 1254                         if (sc->rxd_base_dma != NULL)
 1255                                 bus_dmamem_free(sc->dma_rxd_tag,
 1256                                     sc->rxd_base_dma, sc->dma_rxd_map);
 1257 
 1258                 }
 1259                 bus_dma_tag_destroy(sc->dma_rxd_tag);
 1260                 sc->dma_rxd_map = NULL;
 1261                 sc->dma_rxd_tag = NULL;
 1262                 sc->rxd_base_dma = NULL;
 1263         }
 1264         if (sc->dma_parent_tag != NULL) {
 1265                 bus_dma_tag_destroy(sc->dma_parent_tag);
 1266                 sc->dma_parent_tag = NULL;
 1267         }
 1268 }
 1269 
 1270 static int
 1271 ae_shutdown(device_t dev)
 1272 {
 1273         ae_softc_t *sc;
 1274         int error;
 1275 
 1276         sc = device_get_softc(dev);
 1277         KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
 1278 
 1279         error = ae_suspend(dev);
 1280         AE_LOCK(sc);
 1281         ae_powersave_enable(sc);
 1282         AE_UNLOCK(sc);
 1283         return (error);
 1284 }
 1285 
 1286 static void
 1287 ae_powersave_disable(ae_softc_t *sc)
 1288 {
 1289         uint32_t val;
 1290         
 1291         AE_LOCK_ASSERT(sc);
 1292 
 1293         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
 1294         val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
 1295         if (val & AE_PHY_DBG_POWERSAVE) {
 1296                 val &= ~AE_PHY_DBG_POWERSAVE;
 1297                 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
 1298                 DELAY(1000);
 1299         }
 1300 }
 1301 
 1302 static void
 1303 ae_powersave_enable(ae_softc_t *sc)
 1304 {
 1305         uint32_t val;
 1306         
 1307         AE_LOCK_ASSERT(sc);
 1308 
 1309         /*
 1310          * XXX magic numbers.
 1311          */
 1312         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
 1313         val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
 1314         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
 1315         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
 1316         AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
 1317         AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
 1318         AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
 1319 }
 1320 
 1321 static void
 1322 ae_pm_init(ae_softc_t *sc)
 1323 {
 1324         struct ifnet *ifp;
 1325         uint32_t val;
 1326         uint16_t pmstat;
 1327         struct mii_data *mii;
 1328         int pmc;
 1329 
 1330         AE_LOCK_ASSERT(sc);
 1331 
 1332         ifp = sc->ifp;
 1333         if ((sc->flags & AE_FLAG_PMG) == 0) {
 1334                 /* Disable WOL entirely. */
 1335                 AE_WRITE_4(sc, AE_WOL_REG, 0);
 1336                 return;
 1337         }
 1338 
 1339         /*
 1340          * Configure WOL if enabled.
 1341          */
 1342         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 1343                 mii = device_get_softc(sc->miibus);
 1344                 mii_pollstat(mii);
 1345                 if ((mii->mii_media_status & IFM_AVALID) != 0 &&
 1346                     (mii->mii_media_status & IFM_ACTIVE) != 0) {
 1347                         AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
 1348                             AE_WOL_MAGIC_PME);
 1349 
 1350                         /*
 1351                          * Configure MAC.
 1352                          */
 1353                         val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
 1354                             AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
 1355                             ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
 1356                             AE_HALFBUF_MASK) | \
 1357                             ((AE_MAC_PREAMBLE_DEFAULT << \
 1358                             AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
 1359                             AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
 1360                         if ((IFM_OPTIONS(mii->mii_media_active) & \
 1361                             IFM_FDX) != 0)
 1362                                 val |= AE_MAC_FULL_DUPLEX;
 1363                         AE_WRITE_4(sc, AE_MAC_REG, val);
 1364                             
 1365                 } else {        /* No link. */
 1366                         AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
 1367                             AE_WOL_LNKCHG_PME);
 1368                         AE_WRITE_4(sc, AE_MAC_REG, 0);
 1369                 }
 1370         } else {
 1371                 ae_powersave_enable(sc);
 1372         }
 1373 
 1374         /*
 1375          * PCIE hacks. Magic numbers.
 1376          */
 1377         val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
 1378         val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
 1379         AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
 1380         val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
 1381         val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
 1382         AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
 1383 
 1384         /*
 1385          * Configure PME.
 1386          */
 1387         if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
 1388                 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
 1389                 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1390                 if ((ifp->if_capenable & IFCAP_WOL) != 0)
 1391                         pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1392                 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1393         }
 1394 }
 1395 
 1396 static int
 1397 ae_suspend(device_t dev)
 1398 {
 1399         ae_softc_t *sc;
 1400 
 1401         sc = device_get_softc(dev);
 1402 
 1403         AE_LOCK(sc);
 1404         ae_stop(sc);
 1405         ae_pm_init(sc);
 1406         AE_UNLOCK(sc);
 1407 
 1408         return (0);
 1409 }
 1410 
 1411 static int
 1412 ae_resume(device_t dev)
 1413 {
 1414         ae_softc_t *sc;
 1415 
 1416         sc = device_get_softc(dev);
 1417         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1418 
 1419         AE_LOCK(sc);
 1420         AE_READ_4(sc, AE_WOL_REG);      /* Clear WOL status. */
 1421         if ((sc->ifp->if_flags & IFF_UP) != 0)
 1422                 ae_init_locked(sc);
 1423         AE_UNLOCK(sc);
 1424 
 1425         return (0);
 1426 }
 1427 
 1428 static unsigned int
 1429 ae_tx_avail_size(ae_softc_t *sc)
 1430 {
 1431         unsigned int avail;
 1432         
 1433         if (sc->txd_cur >= sc->txd_ack)
 1434                 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
 1435         else
 1436                 avail = sc->txd_ack - sc->txd_cur;
 1437 
 1438         return (avail);
 1439 }
 1440 
 1441 static int
 1442 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
 1443 {
 1444         struct mbuf *m0;
 1445         ae_txd_t *hdr;
 1446         unsigned int to_end;
 1447         uint16_t len;
 1448 
 1449         AE_LOCK_ASSERT(sc);
 1450 
 1451         m0 = *m_head;
 1452         len = m0->m_pkthdr.len;
 1453         
 1454         if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
 1455             len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
 1456 #ifdef AE_DEBUG
 1457                 if_printf(sc->ifp, "No free Tx available.\n");
 1458 #endif
 1459                 return ENOBUFS;
 1460         }
 1461 
 1462         hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
 1463         bzero(hdr, sizeof(*hdr));
 1464         /* Skip header size. */
 1465         sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
 1466         /* Space available to the end of the ring */
 1467         to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
 1468         if (to_end >= len) {
 1469                 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
 1470         } else {
 1471                 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
 1472                     sc->txd_cur));
 1473                 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
 1474         }
 1475 
 1476         /*
 1477          * Set TxD flags and parameters.
 1478          */
 1479         if ((m0->m_flags & M_VLANTAG) != 0) {
 1480                 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
 1481                 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
 1482         } else {
 1483                 hdr->len = htole16(len);
 1484         }
 1485 
 1486         /*
 1487          * Set current TxD position and round up to a 4-byte boundary.
 1488          */
 1489         sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
 1490         if (sc->txd_cur == sc->txd_ack)
 1491                 sc->flags &= ~AE_FLAG_TXAVAIL;
 1492 #ifdef AE_DEBUG
 1493         if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
 1494 #endif
 1495 
 1496         /*
 1497          * Update TxS position and check if there are empty TxS available.
 1498          */
 1499         sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
 1500         sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
 1501         if (sc->txs_cur == sc->txs_ack)
 1502                 sc->flags &= ~AE_FLAG_TXAVAIL;
 1503 
 1504         /*
 1505          * Synchronize DMA memory.
 1506          */
 1507         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
 1508             BUS_DMASYNC_PREWRITE);
 1509         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1510             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1511 
 1512         return (0);
 1513 }
 1514 
 1515 static void
 1516 ae_start(struct ifnet *ifp)
 1517 {
 1518         ae_softc_t *sc;
 1519 
 1520         sc = ifp->if_softc;
 1521         AE_LOCK(sc);
 1522         ae_start_locked(ifp);
 1523         AE_UNLOCK(sc);
 1524 }
 1525 
 1526 static void
 1527 ae_start_locked(struct ifnet *ifp)
 1528 {
 1529         ae_softc_t *sc;
 1530         unsigned int count;
 1531         struct mbuf *m0;
 1532         int error;
 1533 
 1534         sc = ifp->if_softc;
 1535         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1536         AE_LOCK_ASSERT(sc);
 1537 
 1538 #ifdef AE_DEBUG
 1539         if_printf(ifp, "Start called.\n");
 1540 #endif
 1541 
 1542         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1543             IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
 1544                 return;
 1545 
 1546         count = 0;
 1547         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 1548                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
 1549                 if (m0 == NULL)
 1550                         break;  /* Nothing to do. */
 1551 
 1552                 error = ae_encap(sc, &m0);
 1553                 if (error != 0) {
 1554                         if (m0 != NULL) {
 1555                                 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
 1556                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1557 #ifdef AE_DEBUG
 1558                                 if_printf(ifp, "Setting OACTIVE.\n");
 1559 #endif
 1560                         }
 1561                         break;
 1562                 }
 1563                 count++;
 1564                 sc->tx_inproc++;
 1565 
 1566                 /* Bounce a copy of the frame to BPF. */
 1567                 ETHER_BPF_MTAP(ifp, m0);
 1568 
 1569                 m_freem(m0);
 1570         }
 1571 
 1572         if (count > 0) {        /* Something was dequeued. */
 1573                 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
 1574                 sc->wd_timer = AE_TX_TIMEOUT;   /* Load watchdog. */
 1575 #ifdef AE_DEBUG
 1576                 if_printf(ifp, "%d packets dequeued.\n", count);
 1577                 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
 1578 #endif
 1579         }
 1580 }
 1581 
 1582 static void
 1583 ae_link_task(void *arg, int pending)
 1584 {
 1585         ae_softc_t *sc;
 1586         struct mii_data *mii;
 1587         struct ifnet *ifp;
 1588         uint32_t val;
 1589 
 1590         sc = (ae_softc_t *)arg;
 1591         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1592         AE_LOCK(sc);
 1593 
 1594         ifp = sc->ifp;
 1595         mii = device_get_softc(sc->miibus);
 1596         if (mii == NULL || ifp == NULL ||
 1597             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1598                 AE_UNLOCK(sc);  /* XXX: could happen? */
 1599                 return;
 1600         }
 1601         
 1602         sc->flags &= ~AE_FLAG_LINK;
 1603         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
 1604             (IFM_AVALID | IFM_ACTIVE)) {
 1605                 switch(IFM_SUBTYPE(mii->mii_media_active)) {
 1606                 case IFM_10_T:
 1607                 case IFM_100_TX:
 1608                         sc->flags |= AE_FLAG_LINK;
 1609                         break;
 1610                 default:
 1611                         break;
 1612                 }
 1613         }
 1614 
 1615         /*
 1616          * Stop Rx/Tx MACs.
 1617          */
 1618         ae_stop_rxmac(sc);
 1619         ae_stop_txmac(sc);
 1620 
 1621         if ((sc->flags & AE_FLAG_LINK) != 0) {
 1622                 ae_mac_config(sc);
 1623 
 1624                 /*
 1625                  * Restart DMA engines.
 1626                  */
 1627                 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
 1628                 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
 1629 
 1630                 /*
 1631                  * Enable Rx and Tx MACs.
 1632                  */
 1633                 val = AE_READ_4(sc, AE_MAC_REG);
 1634                 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
 1635                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1636         }
 1637         AE_UNLOCK(sc);
 1638 }
 1639 
 1640 static void
 1641 ae_stop_rxmac(ae_softc_t *sc)
 1642 {
 1643         uint32_t val;
 1644         int i;
 1645 
 1646         AE_LOCK_ASSERT(sc);
 1647 
 1648         /*
 1649          * Stop Rx MAC engine.
 1650          */
 1651         val = AE_READ_4(sc, AE_MAC_REG);
 1652         if ((val & AE_MAC_RX_EN) != 0) {
 1653                 val &= ~AE_MAC_RX_EN;
 1654                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1655         }
 1656 
 1657         /*
 1658          * Stop Rx DMA engine.
 1659          */
 1660         if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
 1661                 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
 1662 
 1663         /*
 1664          * Wait for IDLE state.
 1665          */
 1666         for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
 1667                 val = AE_READ_4(sc, AE_IDLE_REG);
 1668                 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
 1669                         break;
 1670                 DELAY(100);
 1671         }
 1672         if (i == AE_IDLE_TIMEOUT)
 1673                 device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
 1674 }
 1675 
 1676 static void
 1677 ae_stop_txmac(ae_softc_t *sc)
 1678 {
 1679         uint32_t val;
 1680         int i;
 1681 
 1682         AE_LOCK_ASSERT(sc);
 1683 
 1684         /*
 1685          * Stop Tx MAC engine.
 1686          */
 1687         val = AE_READ_4(sc, AE_MAC_REG);
 1688         if ((val & AE_MAC_TX_EN) != 0) {
 1689                 val &= ~AE_MAC_TX_EN;
 1690                 AE_WRITE_4(sc, AE_MAC_REG, val);
 1691         }
 1692 
 1693         /*
 1694          * Stop Tx DMA engine.
 1695          */
 1696         if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
 1697                 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
 1698 
 1699         /*
 1700          * Wait for IDLE state.
 1701          */
 1702         for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
 1703                 val = AE_READ_4(sc, AE_IDLE_REG);
 1704                 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
 1705                         break;
 1706                 DELAY(100);
 1707         }
 1708         if (i == AE_IDLE_TIMEOUT)
 1709                 device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
 1710 }
 1711 
 1712 static void
 1713 ae_mac_config(ae_softc_t *sc)
 1714 {
 1715         struct mii_data *mii;
 1716         uint32_t val;
 1717 
 1718         AE_LOCK_ASSERT(sc);
 1719 
 1720         mii = device_get_softc(sc->miibus);
 1721         val = AE_READ_4(sc, AE_MAC_REG);
 1722         val &= ~AE_MAC_FULL_DUPLEX;
 1723         /* XXX disable AE_MAC_TX_FLOW_EN? */
 1724 
 1725         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
 1726                 val |= AE_MAC_FULL_DUPLEX;
 1727 
 1728         AE_WRITE_4(sc, AE_MAC_REG, val);
 1729 }
 1730 
 1731 static int
 1732 ae_intr(void *arg)
 1733 {
 1734         ae_softc_t *sc;
 1735         uint32_t val;
 1736 
 1737         sc = (ae_softc_t *)arg;
 1738         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
 1739 
 1740         val = AE_READ_4(sc, AE_ISR_REG);
 1741         if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
 1742                 return (FILTER_STRAY);
 1743 
 1744         /* Disable interrupts. */
 1745         AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
 1746 
 1747         /* Schedule interrupt processing. */
 1748         taskqueue_enqueue(sc->tq, &sc->int_task);
 1749 
 1750         return (FILTER_HANDLED);
 1751 }
 1752 
 1753 static void
 1754 ae_int_task(void *arg, int pending)
 1755 {
 1756         ae_softc_t *sc;
 1757         struct ifnet *ifp;
 1758         uint32_t val;
 1759 
 1760         sc = (ae_softc_t *)arg;
 1761 
 1762         AE_LOCK(sc);
 1763 
 1764         ifp = sc->ifp;
 1765 
 1766         val = AE_READ_4(sc, AE_ISR_REG);        /* Read interrupt status. */
 1767         if (val == 0) {
 1768                 AE_UNLOCK(sc);
 1769                 return;
 1770         }
 1771 
 1772         /*
 1773          * Clear interrupts and disable them.
 1774          */
 1775         AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
 1776 
 1777 #ifdef AE_DEBUG
 1778         if_printf(ifp, "Interrupt received: 0x%08x\n", val);
 1779 #endif
 1780 
 1781         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1782                 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
 1783                     AE_ISR_PHY_LINKDOWN)) != 0) {
 1784                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1785                         ae_init_locked(sc);
 1786                         AE_UNLOCK(sc);
 1787                         return;
 1788                 }
 1789                 if ((val & AE_ISR_TX_EVENT) != 0)
 1790                         ae_tx_intr(sc);
 1791                 if ((val & AE_ISR_RX_EVENT) != 0)
 1792                         ae_rx_intr(sc);
 1793                 /*
 1794                  * Re-enable interrupts.
 1795                  */
 1796                 AE_WRITE_4(sc, AE_ISR_REG, 0);
 1797 
 1798                 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
 1799                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1800                                 ae_start_locked(ifp);
 1801                 }
 1802         }
 1803 
 1804         AE_UNLOCK(sc);
 1805 }
 1806 
 1807 static void
 1808 ae_tx_intr(ae_softc_t *sc)
 1809 {
 1810         struct ifnet *ifp;
 1811         ae_txd_t *txd;
 1812         ae_txs_t *txs;
 1813         uint16_t flags;
 1814 
 1815         AE_LOCK_ASSERT(sc);
 1816 
 1817         ifp = sc->ifp;
 1818 
 1819 #ifdef AE_DEBUG
 1820         if_printf(ifp, "Tx interrupt occuried.\n");
 1821 #endif
 1822 
 1823         /*
 1824          * Syncronize DMA buffers.
 1825          */
 1826         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
 1827             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1828         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1829             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1830 
 1831         for (;;) {
 1832                 txs = sc->txs_base + sc->txs_ack;
 1833                 flags = le16toh(txs->flags);
 1834                 if ((flags & AE_TXS_UPDATE) == 0)
 1835                         break;
 1836                 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
 1837                 /* Update stats. */
 1838                 ae_update_stats_tx(flags, &sc->stats);
 1839 
 1840                 /*
 1841                  * Update TxS position.
 1842                  */
 1843                 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
 1844                 sc->flags |= AE_FLAG_TXAVAIL;
 1845 
 1846                 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
 1847                 if (txs->len != txd->len)
 1848                         device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
 1849                             le16toh(txs->len), le16toh(txd->len));
 1850 
 1851                 /*
 1852                  * Move txd ack and align on 4-byte boundary.
 1853                  */
 1854                 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
 1855                     sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
 1856 
 1857                 if ((flags & AE_TXS_SUCCESS) != 0)
 1858                         ifp->if_opackets++;
 1859                 else
 1860                         ifp->if_oerrors++;
 1861 
 1862                 sc->tx_inproc--;
 1863         }
 1864 
 1865         if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
 1866                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1867         if (sc->tx_inproc < 0) {
 1868                 if_printf(ifp, "Received stray Tx interrupt(s).\n");
 1869                 sc->tx_inproc = 0;
 1870         }
 1871 
 1872         if (sc->tx_inproc == 0)
 1873                 sc->wd_timer = 0;       /* Unarm watchdog. */
 1874 
 1875         /*
 1876          * Syncronize DMA buffers.
 1877          */
 1878         bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
 1879             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1880         bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
 1881             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1882 }
 1883 
 1884 static void
 1885 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
 1886 {
 1887         struct ifnet *ifp;
 1888         struct mbuf *m;
 1889         unsigned int size;
 1890         uint16_t flags;
 1891 
 1892         AE_LOCK_ASSERT(sc);
 1893 
 1894         ifp = sc->ifp;
 1895         flags = le16toh(rxd->flags);
 1896 
 1897 #ifdef AE_DEBUG
 1898         if_printf(ifp, "Rx interrupt occuried.\n");
 1899 #endif
 1900         size = le16toh(rxd->len) - ETHER_CRC_LEN;
 1901         if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
 1902                 if_printf(ifp, "Runt frame received.");
 1903                 ifp->if_ierrors++;
 1904                 return;
 1905         }
 1906 
 1907         m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
 1908         if (m == NULL) {
 1909                 ifp->if_iqdrops++;
 1910                 return;
 1911         }
 1912 
 1913         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 1914             (flags & AE_RXD_HAS_VLAN) != 0) {
 1915                 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
 1916                 m->m_flags |= M_VLANTAG;
 1917         }
 1918 
 1919         ifp->if_ipackets++;
 1920         /*
 1921          * Pass it through.
 1922          */
 1923         AE_UNLOCK(sc);
 1924         (*ifp->if_input)(ifp, m);
 1925         AE_LOCK(sc);
 1926 }
 1927 
 1928 static void
 1929 ae_rx_intr(ae_softc_t *sc)
 1930 {
 1931         ae_rxd_t *rxd;
 1932         struct ifnet *ifp;
 1933         uint16_t flags;
 1934         int count;
 1935 
 1936         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 1937 
 1938         AE_LOCK_ASSERT(sc);
 1939 
 1940         ifp = sc->ifp;
 1941 
 1942         /*
 1943          * Syncronize DMA buffers.
 1944          */
 1945         bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
 1946             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1947 
 1948         for (count = 0;; count++) {
 1949                 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
 1950                 flags = le16toh(rxd->flags);
 1951                 if ((flags & AE_RXD_UPDATE) == 0)
 1952                         break;
 1953                 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
 1954                 /* Update stats. */
 1955                 ae_update_stats_rx(flags, &sc->stats);
 1956 
 1957                 /*
 1958                  * Update position index.
 1959                  */
 1960                 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
 1961 
 1962                 if ((flags & AE_RXD_SUCCESS) != 0)
 1963                         ae_rxeof(sc, rxd);
 1964                 else
 1965                         ifp->if_ierrors++;
 1966         }
 1967 
 1968         if (count > 0) {
 1969                 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
 1970                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1971                 /*
 1972                  * Update Rx index.
 1973                  */
 1974                 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
 1975         }
 1976 }
 1977 
 1978 static void
 1979 ae_watchdog(ae_softc_t *sc)
 1980 {
 1981         struct ifnet *ifp;
 1982 
 1983         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 1984         AE_LOCK_ASSERT(sc);
 1985         ifp = sc->ifp;
 1986 
 1987         if (sc->wd_timer == 0 || --sc->wd_timer != 0)
 1988                 return;         /* Noting to do. */
 1989 
 1990         if ((sc->flags & AE_FLAG_LINK) == 0)
 1991                 if_printf(ifp, "watchdog timeout (missed link).\n");
 1992         else
 1993                 if_printf(ifp, "watchdog timeout - resetting.\n");
 1994 
 1995         ifp->if_oerrors++;
 1996         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1997         ae_init_locked(sc);
 1998         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1999                 ae_start_locked(ifp);
 2000 }
 2001 
 2002 static void
 2003 ae_tick(void *arg)
 2004 {
 2005         ae_softc_t *sc;
 2006         struct mii_data *mii;
 2007 
 2008         sc = (ae_softc_t *)arg;
 2009         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 2010         AE_LOCK_ASSERT(sc);
 2011 
 2012         mii = device_get_softc(sc->miibus);
 2013         mii_tick(mii);
 2014         ae_watchdog(sc);        /* Watchdog check. */
 2015         callout_reset(&sc->tick_ch, hz, ae_tick, sc);
 2016 }
 2017 
 2018 static void
 2019 ae_rxvlan(ae_softc_t *sc)
 2020 {
 2021         struct ifnet *ifp;
 2022         uint32_t val;
 2023 
 2024         AE_LOCK_ASSERT(sc);
 2025         ifp = sc->ifp;
 2026         val = AE_READ_4(sc, AE_MAC_REG);
 2027         val &= ~AE_MAC_RMVLAN_EN;
 2028         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2029                 val |= AE_MAC_RMVLAN_EN;
 2030         AE_WRITE_4(sc, AE_MAC_REG, val);
 2031 }
 2032 
 2033 static void
 2034 ae_rxfilter(ae_softc_t *sc)
 2035 {
 2036         struct ifnet *ifp;
 2037         struct ifmultiaddr *ifma;
 2038         uint32_t crc;
 2039         uint32_t mchash[2];
 2040         uint32_t rxcfg;
 2041 
 2042         KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
 2043 
 2044         AE_LOCK_ASSERT(sc);
 2045 
 2046         ifp = sc->ifp;
 2047 
 2048         rxcfg = AE_READ_4(sc, AE_MAC_REG);
 2049         rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
 2050 
 2051         if ((ifp->if_flags & IFF_BROADCAST) != 0)
 2052                 rxcfg |= AE_MAC_BCAST_EN;
 2053         if ((ifp->if_flags & IFF_PROMISC) != 0)
 2054                 rxcfg |= AE_MAC_PROMISC_EN;
 2055         if ((ifp->if_flags & IFF_ALLMULTI) != 0)
 2056                 rxcfg |= AE_MAC_MCAST_EN;
 2057 
 2058         /*
 2059          * Wipe old settings.
 2060          */
 2061         AE_WRITE_4(sc, AE_REG_MHT0, 0);
 2062         AE_WRITE_4(sc, AE_REG_MHT1, 0);
 2063         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 2064                 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
 2065                 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
 2066                 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
 2067                 return;
 2068         }
 2069 
 2070         /*
 2071          * Load multicast tables.
 2072          */
 2073         bzero(mchash, sizeof(mchash));
 2074         if_maddr_rlock(ifp);
 2075         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 2076                 if (ifma->ifma_addr->sa_family != AF_LINK)
 2077                         continue;
 2078                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
 2079                         ifma->ifma_addr), ETHER_ADDR_LEN);
 2080                 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
 2081         }
 2082         if_maddr_runlock(ifp);
 2083         AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
 2084         AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
 2085         AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
 2086 }
 2087 
 2088 static int
 2089 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 2090 {
 2091         struct ae_softc *sc;
 2092         struct ifreq *ifr;
 2093         struct mii_data *mii;
 2094         int error, mask;
 2095 
 2096         sc = ifp->if_softc;
 2097         ifr = (struct ifreq *)data;
 2098         error = 0;
 2099 
 2100         switch (cmd) {
 2101         case SIOCSIFMTU:
 2102                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
 2103                         error = EINVAL;
 2104                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 2105                         AE_LOCK(sc);
 2106                         ifp->if_mtu = ifr->ifr_mtu;
 2107                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2108                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2109                                 ae_init_locked(sc);
 2110                         }
 2111                         AE_UNLOCK(sc);
 2112                 }
 2113                 break;
 2114         case SIOCSIFFLAGS:
 2115                 AE_LOCK(sc);
 2116                 if ((ifp->if_flags & IFF_UP) != 0) {
 2117                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2118                                 if (((ifp->if_flags ^ sc->if_flags)
 2119                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 2120                                         ae_rxfilter(sc);
 2121                         } else {
 2122                                 if ((sc->flags & AE_FLAG_DETACH) == 0)
 2123                                         ae_init_locked(sc);
 2124                         }
 2125                 } else {
 2126                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2127                                 ae_stop(sc);
 2128                 }
 2129                 sc->if_flags = ifp->if_flags;
 2130                 AE_UNLOCK(sc);
 2131                 break;
 2132         case SIOCADDMULTI:
 2133         case SIOCDELMULTI:
 2134                 AE_LOCK(sc);
 2135                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2136                         ae_rxfilter(sc);
 2137                 AE_UNLOCK(sc);
 2138                 break;
 2139         case SIOCSIFMEDIA:
 2140         case SIOCGIFMEDIA:
 2141                 mii = device_get_softc(sc->miibus);
 2142                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 2143                 break;
 2144         case SIOCSIFCAP:
 2145                 AE_LOCK(sc);
 2146                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 2147                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 2148                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
 2149                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 2150                         ae_rxvlan(sc);
 2151                 }
 2152                 VLAN_CAPABILITIES(ifp);
 2153                 AE_UNLOCK(sc);
 2154                 break;
 2155         default:
 2156                 error = ether_ioctl(ifp, cmd, data);
 2157                 break;
 2158         }
 2159         return (error);
 2160 }
 2161 
 2162 static void
 2163 ae_stop(ae_softc_t *sc)
 2164 {
 2165         struct ifnet *ifp;
 2166         int i;
 2167 
 2168         AE_LOCK_ASSERT(sc);
 2169 
 2170         ifp = sc->ifp;
 2171         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2172         sc->flags &= ~AE_FLAG_LINK;
 2173         sc->wd_timer = 0;       /* Cancel watchdog. */
 2174         callout_stop(&sc->tick_ch);
 2175 
 2176         /*
 2177          * Clear and disable interrupts.
 2178          */
 2179         AE_WRITE_4(sc, AE_IMR_REG, 0);
 2180         AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
 2181 
 2182         /*
 2183          * Stop Rx/Tx MACs.
 2184          */
 2185         ae_stop_txmac(sc);
 2186         ae_stop_rxmac(sc);
 2187 
 2188         /*
 2189          * Stop DMA engines.
 2190          */
 2191         AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
 2192         AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
 2193 
 2194         /*
 2195          * Wait for everything to enter idle state.
 2196          */
 2197         for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
 2198                 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
 2199                         break;
 2200                 DELAY(100);
 2201         }
 2202         if (i == AE_IDLE_TIMEOUT)
 2203                 device_printf(sc->dev, "could not enter idle state in stop.\n");
 2204 }
 2205 
 2206 static void
 2207 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
 2208 {
 2209 
 2210         if ((flags & AE_TXS_BCAST) != 0)
 2211                 stats->tx_bcast++;
 2212         if ((flags & AE_TXS_MCAST) != 0)
 2213                 stats->tx_mcast++;
 2214         if ((flags & AE_TXS_PAUSE) != 0)
 2215                 stats->tx_pause++;
 2216         if ((flags & AE_TXS_CTRL) != 0)
 2217                 stats->tx_ctrl++;
 2218         if ((flags & AE_TXS_DEFER) != 0)
 2219                 stats->tx_defer++;
 2220         if ((flags & AE_TXS_EXCDEFER) != 0)
 2221                 stats->tx_excdefer++;
 2222         if ((flags & AE_TXS_SINGLECOL) != 0)
 2223                 stats->tx_singlecol++;
 2224         if ((flags & AE_TXS_MULTICOL) != 0)
 2225                 stats->tx_multicol++;
 2226         if ((flags & AE_TXS_LATECOL) != 0)
 2227                 stats->tx_latecol++;
 2228         if ((flags & AE_TXS_ABORTCOL) != 0)
 2229                 stats->tx_abortcol++;
 2230         if ((flags & AE_TXS_UNDERRUN) != 0)
 2231                 stats->tx_underrun++;
 2232 }
 2233 
 2234 static void
 2235 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
 2236 {
 2237 
 2238         if ((flags & AE_RXD_BCAST) != 0)
 2239                 stats->rx_bcast++;
 2240         if ((flags & AE_RXD_MCAST) != 0)
 2241                 stats->rx_mcast++;
 2242         if ((flags & AE_RXD_PAUSE) != 0)
 2243                 stats->rx_pause++;
 2244         if ((flags & AE_RXD_CTRL) != 0)
 2245                 stats->rx_ctrl++;
 2246         if ((flags & AE_RXD_CRCERR) != 0)
 2247                 stats->rx_crcerr++;
 2248         if ((flags & AE_RXD_CODEERR) != 0)
 2249                 stats->rx_codeerr++;
 2250         if ((flags & AE_RXD_RUNT) != 0)
 2251                 stats->rx_runt++;
 2252         if ((flags & AE_RXD_FRAG) != 0)
 2253                 stats->rx_frag++;
 2254         if ((flags & AE_RXD_TRUNC) != 0)
 2255                 stats->rx_trunc++;
 2256         if ((flags & AE_RXD_ALIGN) != 0)
 2257                 stats->rx_align++;
 2258 }

Cache object: 17a96cf195276ff94d9c6af5bfd5465e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.