The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/age/if_age.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/8.3/sys/dev/age/if_age.c 229058 2011-12-31 01:08:31Z yongari $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #include <sys/endian.h>
   37 #include <sys/kernel.h>
   38 #include <sys/malloc.h>
   39 #include <sys/mbuf.h>
   40 #include <sys/rman.h>
   41 #include <sys/module.h>
   42 #include <sys/queue.h>
   43 #include <sys/socket.h>
   44 #include <sys/sockio.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/taskqueue.h>
   47 
   48 #include <net/bpf.h>
   49 #include <net/if.h>
   50 #include <net/if_arp.h>
   51 #include <net/ethernet.h>
   52 #include <net/if_dl.h>
   53 #include <net/if_media.h>
   54 #include <net/if_types.h>
   55 #include <net/if_vlan_var.h>
   56 
   57 #include <netinet/in.h>
   58 #include <netinet/in_systm.h>
   59 #include <netinet/ip.h>
   60 #include <netinet/tcp.h>
   61 
   62 #include <dev/mii/mii.h>
   63 #include <dev/mii/miivar.h>
   64 
   65 #include <dev/pci/pcireg.h>
   66 #include <dev/pci/pcivar.h>
   67 
   68 #include <machine/bus.h>
   69 #include <machine/in_cksum.h>
   70 
   71 #include <dev/age/if_agereg.h>
   72 #include <dev/age/if_agevar.h>
   73 
   74 /* "device miibus" required.  See GENERIC if you get errors here. */
   75 #include "miibus_if.h"
   76 
   77 #define AGE_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
   78 
   79 MODULE_DEPEND(age, pci, 1, 1, 1);
   80 MODULE_DEPEND(age, ether, 1, 1, 1);
   81 MODULE_DEPEND(age, miibus, 1, 1, 1);
   82 
   83 /* Tunables. */
   84 static int msi_disable = 0;
   85 static int msix_disable = 0;
   86 TUNABLE_INT("hw.age.msi_disable", &msi_disable);
   87 TUNABLE_INT("hw.age.msix_disable", &msix_disable);
   88 
   89 /*
   90  * Devices supported by this driver.
   91  */
   92 static struct age_dev {
   93         uint16_t        age_vendorid;
   94         uint16_t        age_deviceid;
   95         const char      *age_name;
   96 } age_devs[] = {
   97         { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
   98             "Attansic Technology Corp, L1 Gigabit Ethernet" },
   99 };
  100 
  101 static int age_miibus_readreg(device_t, int, int);
  102 static int age_miibus_writereg(device_t, int, int, int);
  103 static void age_miibus_statchg(device_t);
  104 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
  105 static int age_mediachange(struct ifnet *);
  106 static int age_probe(device_t);
  107 static void age_get_macaddr(struct age_softc *);
  108 static void age_phy_reset(struct age_softc *);
  109 static int age_attach(device_t);
  110 static int age_detach(device_t);
  111 static void age_sysctl_node(struct age_softc *);
  112 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  113 static int age_check_boundary(struct age_softc *);
  114 static int age_dma_alloc(struct age_softc *);
  115 static void age_dma_free(struct age_softc *);
  116 static int age_shutdown(device_t);
  117 static void age_setwol(struct age_softc *);
  118 static int age_suspend(device_t);
  119 static int age_resume(device_t);
  120 static int age_encap(struct age_softc *, struct mbuf **);
  121 static void age_start(struct ifnet *);
  122 static void age_start_locked(struct ifnet *);
  123 static void age_watchdog(struct age_softc *);
  124 static int age_ioctl(struct ifnet *, u_long, caddr_t);
  125 static void age_mac_config(struct age_softc *);
  126 static void age_link_task(void *, int);
  127 static void age_stats_update(struct age_softc *);
  128 static int age_intr(void *);
  129 static void age_int_task(void *, int);
  130 static void age_txintr(struct age_softc *, int);
  131 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
  132 static int age_rxintr(struct age_softc *, int, int);
  133 static void age_tick(void *);
  134 static void age_reset(struct age_softc *);
  135 static void age_init(void *);
  136 static void age_init_locked(struct age_softc *);
  137 static void age_stop(struct age_softc *);
  138 static void age_stop_txmac(struct age_softc *);
  139 static void age_stop_rxmac(struct age_softc *);
  140 static void age_init_tx_ring(struct age_softc *);
  141 static int age_init_rx_ring(struct age_softc *);
  142 static void age_init_rr_ring(struct age_softc *);
  143 static void age_init_cmb_block(struct age_softc *);
  144 static void age_init_smb_block(struct age_softc *);
  145 static int age_newbuf(struct age_softc *, struct age_rxdesc *);
  146 static void age_rxvlan(struct age_softc *);
  147 static void age_rxfilter(struct age_softc *);
  148 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
  149 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  150 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
  151 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
  152 
  153 
  154 static device_method_t age_methods[] = {
  155         /* Device interface. */
  156         DEVMETHOD(device_probe,         age_probe),
  157         DEVMETHOD(device_attach,        age_attach),
  158         DEVMETHOD(device_detach,        age_detach),
  159         DEVMETHOD(device_shutdown,      age_shutdown),
  160         DEVMETHOD(device_suspend,       age_suspend),
  161         DEVMETHOD(device_resume,        age_resume),
  162 
  163         /* MII interface. */
  164         DEVMETHOD(miibus_readreg,       age_miibus_readreg),
  165         DEVMETHOD(miibus_writereg,      age_miibus_writereg),
  166         DEVMETHOD(miibus_statchg,       age_miibus_statchg),
  167 
  168         { NULL, NULL }
  169 };
  170 
  171 static driver_t age_driver = {
  172         "age",
  173         age_methods,
  174         sizeof(struct age_softc)
  175 };
  176 
  177 static devclass_t age_devclass;
  178 
  179 DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
  180 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
  181 
  182 static struct resource_spec age_res_spec_mem[] = {
  183         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  184         { -1,                   0,              0 }
  185 };
  186 
  187 static struct resource_spec age_irq_spec_legacy[] = {
  188         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  189         { -1,                   0,              0 }
  190 };
  191 
  192 static struct resource_spec age_irq_spec_msi[] = {
  193         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  194         { -1,                   0,              0 }
  195 };
  196 
  197 static struct resource_spec age_irq_spec_msix[] = {
  198         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  199         { -1,                   0,              0 }
  200 };
  201 
  202 /*
  203  *      Read a PHY register on the MII of the L1.
  204  */
  205 static int
  206 age_miibus_readreg(device_t dev, int phy, int reg)
  207 {
  208         struct age_softc *sc;
  209         uint32_t v;
  210         int i;
  211 
  212         sc = device_get_softc(dev);
  213 
  214         CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
  215             MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
  216         for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
  217                 DELAY(1);
  218                 v = CSR_READ_4(sc, AGE_MDIO);
  219                 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
  220                         break;
  221         }
  222 
  223         if (i == 0) {
  224                 device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
  225                 return (0);
  226         }
  227 
  228         return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
  229 }
  230 
  231 /*
  232  *      Write a PHY register on the MII of the L1.
  233  */
  234 static int
  235 age_miibus_writereg(device_t dev, int phy, int reg, int val)
  236 {
  237         struct age_softc *sc;
  238         uint32_t v;
  239         int i;
  240 
  241         sc = device_get_softc(dev);
  242 
  243         CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
  244             (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
  245             MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
  246         for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
  247                 DELAY(1);
  248                 v = CSR_READ_4(sc, AGE_MDIO);
  249                 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
  250                         break;
  251         }
  252 
  253         if (i == 0)
  254                 device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
  255 
  256         return (0);
  257 }
  258 
  259 /*
  260  *      Callback from MII layer when media changes.
  261  */
  262 static void
  263 age_miibus_statchg(device_t dev)
  264 {
  265         struct age_softc *sc;
  266 
  267         sc = device_get_softc(dev);
  268         taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
  269 }
  270 
  271 /*
  272  *      Get the current interface media status.
  273  */
  274 static void
  275 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  276 {
  277         struct age_softc *sc;
  278         struct mii_data *mii;
  279 
  280         sc = ifp->if_softc;
  281         AGE_LOCK(sc);
  282         mii = device_get_softc(sc->age_miibus);
  283 
  284         mii_pollstat(mii);
  285         ifmr->ifm_status = mii->mii_media_status;
  286         ifmr->ifm_active = mii->mii_media_active;
  287         AGE_UNLOCK(sc);
  288 }
  289 
  290 /*
  291  *      Set hardware to newly-selected media.
  292  */
  293 static int
  294 age_mediachange(struct ifnet *ifp)
  295 {
  296         struct age_softc *sc;
  297         struct mii_data *mii;
  298         struct mii_softc *miisc;
  299         int error;
  300 
  301         sc = ifp->if_softc;
  302         AGE_LOCK(sc);
  303         mii = device_get_softc(sc->age_miibus);
  304         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
  305                 mii_phy_reset(miisc);
  306         error = mii_mediachg(mii);
  307         AGE_UNLOCK(sc);
  308 
  309         return (error);
  310 }
  311 
  312 static int
  313 age_probe(device_t dev)
  314 {
  315         struct age_dev *sp;
  316         int i;
  317         uint16_t vendor, devid;
  318 
  319         vendor = pci_get_vendor(dev);
  320         devid = pci_get_device(dev);
  321         sp = age_devs;
  322         for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]);
  323             i++, sp++) {
  324                 if (vendor == sp->age_vendorid &&
  325                     devid == sp->age_deviceid) {
  326                         device_set_desc(dev, sp->age_name);
  327                         return (BUS_PROBE_DEFAULT);
  328                 }
  329         }
  330 
  331         return (ENXIO);
  332 }
  333 
  334 static void
  335 age_get_macaddr(struct age_softc *sc)
  336 {
  337         uint32_t ea[2], reg;
  338         int i, vpdc;
  339 
  340         reg = CSR_READ_4(sc, AGE_SPI_CTRL);
  341         if ((reg & SPI_VPD_ENB) != 0) {
  342                 /* Get VPD stored in TWSI EEPROM. */
  343                 reg &= ~SPI_VPD_ENB;
  344                 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
  345         }
  346 
  347         if (pci_find_extcap(sc->age_dev, PCIY_VPD, &vpdc) == 0) {
  348                 /*
  349                  * PCI VPD capability found, let TWSI reload EEPROM.
  350                  * This will set ethernet address of controller.
  351                  */
  352                 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
  353                     TWSI_CTRL_SW_LD_START);
  354                 for (i = 100; i > 0; i--) {
  355                         DELAY(1000);
  356                         reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
  357                         if ((reg & TWSI_CTRL_SW_LD_START) == 0)
  358                                 break;
  359                 }
  360                 if (i == 0)
  361                         device_printf(sc->age_dev,
  362                             "reloading EEPROM timeout!\n");
  363         } else {
  364                 if (bootverbose)
  365                         device_printf(sc->age_dev,
  366                             "PCI VPD capability not found!\n");
  367         }
  368 
  369         ea[0] = CSR_READ_4(sc, AGE_PAR0);
  370         ea[1] = CSR_READ_4(sc, AGE_PAR1);
  371         sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
  372         sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
  373         sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
  374         sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
  375         sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
  376         sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
  377 }
  378 
  379 static void
  380 age_phy_reset(struct age_softc *sc)
  381 {
  382         uint16_t reg, pn;
  383         int i, linkup;
  384 
  385         /* Reset PHY. */
  386         CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
  387         DELAY(2000);
  388         CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
  389         DELAY(2000);
  390 
  391 #define ATPHY_DBG_ADDR          0x1D
  392 #define ATPHY_DBG_DATA          0x1E
  393 #define ATPHY_CDTC              0x16
  394 #define PHY_CDTC_ENB            0x0001
  395 #define PHY_CDTC_POFF           8
  396 #define ATPHY_CDTS              0x1C
  397 #define PHY_CDTS_STAT_OK        0x0000
  398 #define PHY_CDTS_STAT_SHORT     0x0100
  399 #define PHY_CDTS_STAT_OPEN      0x0200
  400 #define PHY_CDTS_STAT_INVAL     0x0300
  401 #define PHY_CDTS_STAT_MASK      0x0300
  402 
  403         /* Check power saving mode. Magic from Linux. */
  404         age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
  405         for (linkup = 0, pn = 0; pn < 4; pn++) {
  406                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC,
  407                     (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
  408                 for (i = 200; i > 0; i--) {
  409                         DELAY(1000);
  410                         reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
  411                             ATPHY_CDTC);
  412                         if ((reg & PHY_CDTC_ENB) == 0)
  413                                 break;
  414                 }
  415                 DELAY(1000);
  416                 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
  417                     ATPHY_CDTS);
  418                 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
  419                         linkup++;
  420                         break;
  421                 }
  422         }
  423         age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR,
  424             BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
  425         if (linkup == 0) {
  426                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  427                     ATPHY_DBG_ADDR, 0);
  428                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  429                     ATPHY_DBG_DATA, 0x124E);
  430                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  431                     ATPHY_DBG_ADDR, 1);
  432                 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
  433                     ATPHY_DBG_DATA);
  434                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  435                     ATPHY_DBG_DATA, reg | 0x03);
  436                 /* XXX */
  437                 DELAY(1500 * 1000);
  438                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  439                     ATPHY_DBG_ADDR, 0);
  440                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
  441                     ATPHY_DBG_DATA, 0x024E);
  442     }
  443 
  444 #undef  ATPHY_DBG_ADDR
  445 #undef  ATPHY_DBG_DATA
  446 #undef  ATPHY_CDTC
  447 #undef  PHY_CDTC_ENB
  448 #undef  PHY_CDTC_POFF
  449 #undef  ATPHY_CDTS
  450 #undef  PHY_CDTS_STAT_OK
  451 #undef  PHY_CDTS_STAT_SHORT
  452 #undef  PHY_CDTS_STAT_OPEN
  453 #undef  PHY_CDTS_STAT_INVAL
  454 #undef  PHY_CDTS_STAT_MASK
  455 }
  456 
  457 static int
  458 age_attach(device_t dev)
  459 {
  460         struct age_softc *sc;
  461         struct ifnet *ifp;
  462         uint16_t burst;
  463         int error, i, msic, msixc, pmc;
  464 
  465         error = 0;
  466         sc = device_get_softc(dev);
  467         sc->age_dev = dev;
  468 
  469         mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  470             MTX_DEF);
  471         callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
  472         TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
  473         TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
  474 
  475         /* Map the device. */
  476         pci_enable_busmaster(dev);
  477         sc->age_res_spec = age_res_spec_mem;
  478         sc->age_irq_spec = age_irq_spec_legacy;
  479         error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
  480         if (error != 0) {
  481                 device_printf(dev, "cannot allocate memory resources.\n");
  482                 goto fail;
  483         }
  484 
  485         /* Set PHY address. */
  486         sc->age_phyaddr = AGE_PHY_ADDR;
  487 
  488         /* Reset PHY. */
  489         age_phy_reset(sc);
  490 
  491         /* Reset the ethernet controller. */
  492         age_reset(sc);
  493 
  494         /* Get PCI and chip id/revision. */
  495         sc->age_rev = pci_get_revid(dev);
  496         sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
  497             MASTER_CHIP_REV_SHIFT;
  498         if (bootverbose) {
  499                 device_printf(dev, "PCI device revision : 0x%04x\n",
  500                     sc->age_rev);
  501                 device_printf(dev, "Chip id/revision : 0x%04x\n",
  502                     sc->age_chip_rev);
  503         }
  504 
  505         /*
  506          * XXX
  507          * Unintialized hardware returns an invalid chip id/revision
  508          * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
  509          * unplugged cable results in putting hardware into automatic
  510          * power down mode which in turn returns invalld chip revision.
  511          */
  512         if (sc->age_chip_rev == 0xFFFF) {
  513                 device_printf(dev,"invalid chip revision : 0x%04x -- "
  514                     "not initialized?\n", sc->age_chip_rev);
  515                 error = ENXIO;
  516                 goto fail;
  517         }
  518 
  519         device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
  520             CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
  521             CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
  522 
  523         /* Allocate IRQ resources. */
  524         msixc = pci_msix_count(dev);
  525         msic = pci_msi_count(dev);
  526         if (bootverbose) {
  527                 device_printf(dev, "MSIX count : %d\n", msixc);
  528                 device_printf(dev, "MSI count : %d\n", msic);
  529         }
  530 
  531         /* Prefer MSIX over MSI. */
  532         if (msix_disable == 0 || msi_disable == 0) {
  533                 if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
  534                     pci_alloc_msix(dev, &msixc) == 0) {
  535                         if (msic == AGE_MSIX_MESSAGES) {
  536                                 device_printf(dev, "Using %d MSIX messages.\n",
  537                                     msixc);
  538                                 sc->age_flags |= AGE_FLAG_MSIX;
  539                                 sc->age_irq_spec = age_irq_spec_msix;
  540                         } else
  541                                 pci_release_msi(dev);
  542                 }
  543                 if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
  544                     msic == AGE_MSI_MESSAGES &&
  545                     pci_alloc_msi(dev, &msic) == 0) {
  546                         if (msic == AGE_MSI_MESSAGES) {
  547                                 device_printf(dev, "Using %d MSI messages.\n",
  548                                     msic);
  549                                 sc->age_flags |= AGE_FLAG_MSI;
  550                                 sc->age_irq_spec = age_irq_spec_msi;
  551                         } else
  552                                 pci_release_msi(dev);
  553                 }
  554         }
  555 
  556         error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
  557         if (error != 0) {
  558                 device_printf(dev, "cannot allocate IRQ resources.\n");
  559                 goto fail;
  560         }
  561 
  562 
  563         /* Get DMA parameters from PCIe device control register. */
  564         if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
  565                 sc->age_flags |= AGE_FLAG_PCIE;
  566                 burst = pci_read_config(dev, i + 0x08, 2);
  567                 /* Max read request size. */
  568                 sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
  569                     DMA_CFG_RD_BURST_SHIFT;
  570                 /* Max payload size. */
  571                 sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
  572                     DMA_CFG_WR_BURST_SHIFT;
  573                 if (bootverbose) {
  574                         device_printf(dev, "Read request size : %d bytes.\n",
  575                             128 << ((burst >> 12) & 0x07));
  576                         device_printf(dev, "TLP payload size : %d bytes.\n",
  577                             128 << ((burst >> 5) & 0x07));
  578                 }
  579         } else {
  580                 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
  581                 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
  582         }
  583 
  584         /* Create device sysctl node. */
  585         age_sysctl_node(sc);
  586 
  587         if ((error = age_dma_alloc(sc) != 0))
  588                 goto fail;
  589 
  590         /* Load station address. */
  591         age_get_macaddr(sc);
  592 
  593         ifp = sc->age_ifp = if_alloc(IFT_ETHER);
  594         if (ifp == NULL) {
  595                 device_printf(dev, "cannot allocate ifnet structure.\n");
  596                 error = ENXIO;
  597                 goto fail;
  598         }
  599 
  600         ifp->if_softc = sc;
  601         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  602         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  603         ifp->if_ioctl = age_ioctl;
  604         ifp->if_start = age_start;
  605         ifp->if_init = age_init;
  606         ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
  607         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  608         IFQ_SET_READY(&ifp->if_snd);
  609         ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
  610         ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
  611         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
  612                 sc->age_flags |= AGE_FLAG_PMCAP;
  613                 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
  614         }
  615         ifp->if_capenable = ifp->if_capabilities;
  616 
  617         /* Set up MII bus. */
  618         error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange,
  619             age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY,
  620             0);
  621         if (error != 0) {
  622                 device_printf(dev, "attaching PHYs failed\n");
  623                 goto fail;
  624         }
  625 
  626         ether_ifattach(ifp, sc->age_eaddr);
  627 
  628         /* VLAN capability setup. */
  629         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
  630             IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
  631         ifp->if_capenable = ifp->if_capabilities;
  632 
  633         /* Tell the upper layer(s) we support long frames. */
  634         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  635 
  636         /* Create local taskq. */
  637         sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
  638             taskqueue_thread_enqueue, &sc->age_tq);
  639         if (sc->age_tq == NULL) {
  640                 device_printf(dev, "could not create taskqueue.\n");
  641                 ether_ifdetach(ifp);
  642                 error = ENXIO;
  643                 goto fail;
  644         }
  645         taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
  646             device_get_nameunit(sc->age_dev));
  647 
  648         if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
  649                 msic = AGE_MSIX_MESSAGES;
  650         else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
  651                 msic = AGE_MSI_MESSAGES;
  652         else
  653                 msic = 1;
  654         for (i = 0; i < msic; i++) {
  655                 error = bus_setup_intr(dev, sc->age_irq[i],
  656                     INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
  657                     &sc->age_intrhand[i]);
  658                 if (error != 0)
  659                         break;
  660         }
  661         if (error != 0) {
  662                 device_printf(dev, "could not set up interrupt handler.\n");
  663                 taskqueue_free(sc->age_tq);
  664                 sc->age_tq = NULL;
  665                 ether_ifdetach(ifp);
  666                 goto fail;
  667         }
  668 
  669 fail:
  670         if (error != 0)
  671                 age_detach(dev);
  672 
  673         return (error);
  674 }
  675 
  676 static int
  677 age_detach(device_t dev)
  678 {
  679         struct age_softc *sc;
  680         struct ifnet *ifp;
  681         int i, msic;
  682 
  683         sc = device_get_softc(dev);
  684 
  685         ifp = sc->age_ifp;
  686         if (device_is_attached(dev)) {
  687                 AGE_LOCK(sc);
  688                 sc->age_flags |= AGE_FLAG_DETACH;
  689                 age_stop(sc);
  690                 AGE_UNLOCK(sc);
  691                 callout_drain(&sc->age_tick_ch);
  692                 taskqueue_drain(sc->age_tq, &sc->age_int_task);
  693                 taskqueue_drain(taskqueue_swi, &sc->age_link_task);
  694                 ether_ifdetach(ifp);
  695         }
  696 
  697         if (sc->age_tq != NULL) {
  698                 taskqueue_drain(sc->age_tq, &sc->age_int_task);
  699                 taskqueue_free(sc->age_tq);
  700                 sc->age_tq = NULL;
  701         }
  702 
  703         if (sc->age_miibus != NULL) {
  704                 device_delete_child(dev, sc->age_miibus);
  705                 sc->age_miibus = NULL;
  706         }
  707         bus_generic_detach(dev);
  708         age_dma_free(sc);
  709 
  710         if (ifp != NULL) {
  711                 if_free(ifp);
  712                 sc->age_ifp = NULL;
  713         }
  714 
  715         if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
  716                 msic = AGE_MSIX_MESSAGES;
  717         else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
  718                 msic = AGE_MSI_MESSAGES;
  719         else
  720                 msic = 1;
  721         for (i = 0; i < msic; i++) {
  722                 if (sc->age_intrhand[i] != NULL) {
  723                         bus_teardown_intr(dev, sc->age_irq[i],
  724                             sc->age_intrhand[i]);
  725                         sc->age_intrhand[i] = NULL;
  726                 }
  727         }
  728 
  729         bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
  730         if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
  731                 pci_release_msi(dev);
  732         bus_release_resources(dev, sc->age_res_spec, sc->age_res);
  733         mtx_destroy(&sc->age_mtx);
  734 
  735         return (0);
  736 }
  737 
  738 static void
  739 age_sysctl_node(struct age_softc *sc)
  740 {
  741         int error;
  742 
  743         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
  744             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
  745             "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
  746             "I", "Statistics");
  747 
  748         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
  749             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
  750             "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
  751             sysctl_hw_age_int_mod, "I", "age interrupt moderation");
  752 
  753         /* Pull in device tunables. */
  754         sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
  755         error = resource_int_value(device_get_name(sc->age_dev),
  756             device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
  757         if (error == 0) {
  758                 if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
  759                     sc->age_int_mod > AGE_IM_TIMER_MAX) {
  760                         device_printf(sc->age_dev,
  761                             "int_mod value out of range; using default: %d\n",
  762                             AGE_IM_TIMER_DEFAULT);
  763                         sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
  764                 }
  765         }
  766 
  767         SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
  768             SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
  769             "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit,
  770             0, sysctl_hw_age_proc_limit, "I",
  771             "max number of Rx events to process");
  772 
  773         /* Pull in device tunables. */
  774         sc->age_process_limit = AGE_PROC_DEFAULT;
  775         error = resource_int_value(device_get_name(sc->age_dev),
  776             device_get_unit(sc->age_dev), "process_limit",
  777             &sc->age_process_limit);
  778         if (error == 0) {
  779                 if (sc->age_process_limit < AGE_PROC_MIN ||
  780                     sc->age_process_limit > AGE_PROC_MAX) {
  781                         device_printf(sc->age_dev,
  782                             "process_limit value out of range; "
  783                             "using default: %d\n", AGE_PROC_DEFAULT);
  784                         sc->age_process_limit = AGE_PROC_DEFAULT;
  785                 }
  786         }
  787 }
  788 
  789 struct age_dmamap_arg {
  790         bus_addr_t      age_busaddr;
  791 };
  792 
  793 static void
  794 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  795 {
  796         struct age_dmamap_arg *ctx;
  797 
  798         if (error != 0)
  799                 return;
  800 
  801         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  802 
  803         ctx = (struct age_dmamap_arg *)arg;
  804         ctx->age_busaddr = segs[0].ds_addr;
  805 }
  806 
  807 /*
  808  * Attansic L1 controller have single register to specify high
  809  * address part of DMA blocks. So all descriptor structures and
  810  * DMA memory blocks should have the same high address of given
  811  * 4GB address space(i.e. crossing 4GB boundary is not allowed).
  812  */
  813 static int
  814 age_check_boundary(struct age_softc *sc)
  815 {
  816         bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
  817         bus_addr_t cmb_block_end, smb_block_end;
  818 
  819         /* Tx/Rx descriptor queue should reside within 4GB boundary. */
  820         tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
  821         rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
  822         rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
  823         cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
  824         smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
  825 
  826         if ((AGE_ADDR_HI(tx_ring_end) !=
  827             AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
  828             (AGE_ADDR_HI(rx_ring_end) !=
  829             AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
  830             (AGE_ADDR_HI(rr_ring_end) !=
  831             AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
  832             (AGE_ADDR_HI(cmb_block_end) !=
  833             AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
  834             (AGE_ADDR_HI(smb_block_end) !=
  835             AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
  836                 return (EFBIG);
  837 
  838         if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
  839             (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
  840             (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
  841             (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
  842                 return (EFBIG);
  843 
  844         return (0);
  845 }
  846 
  847 static int
  848 age_dma_alloc(struct age_softc *sc)
  849 {
  850         struct age_txdesc *txd;
  851         struct age_rxdesc *rxd;
  852         bus_addr_t lowaddr;
  853         struct age_dmamap_arg ctx;
  854         int error, i;
  855 
  856         lowaddr = BUS_SPACE_MAXADDR;
  857 
  858 again:
  859         /* Create parent ring/DMA block tag. */
  860         error = bus_dma_tag_create(
  861             bus_get_dma_tag(sc->age_dev), /* parent */
  862             1, 0,                       /* alignment, boundary */
  863             lowaddr,                    /* lowaddr */
  864             BUS_SPACE_MAXADDR,          /* highaddr */
  865             NULL, NULL,                 /* filter, filterarg */
  866             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
  867             0,                          /* nsegments */
  868             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
  869             0,                          /* flags */
  870             NULL, NULL,                 /* lockfunc, lockarg */
  871             &sc->age_cdata.age_parent_tag);
  872         if (error != 0) {
  873                 device_printf(sc->age_dev,
  874                     "could not create parent DMA tag.\n");
  875                 goto fail;
  876         }
  877 
  878         /* Create tag for Tx ring. */
  879         error = bus_dma_tag_create(
  880             sc->age_cdata.age_parent_tag, /* parent */
  881             AGE_TX_RING_ALIGN, 0,       /* alignment, boundary */
  882             BUS_SPACE_MAXADDR,          /* lowaddr */
  883             BUS_SPACE_MAXADDR,          /* highaddr */
  884             NULL, NULL,                 /* filter, filterarg */
  885             AGE_TX_RING_SZ,             /* maxsize */
  886             1,                          /* nsegments */
  887             AGE_TX_RING_SZ,             /* maxsegsize */
  888             0,                          /* flags */
  889             NULL, NULL,                 /* lockfunc, lockarg */
  890             &sc->age_cdata.age_tx_ring_tag);
  891         if (error != 0) {
  892                 device_printf(sc->age_dev,
  893                     "could not create Tx ring DMA tag.\n");
  894                 goto fail;
  895         }
  896 
  897         /* Create tag for Rx ring. */
  898         error = bus_dma_tag_create(
  899             sc->age_cdata.age_parent_tag, /* parent */
  900             AGE_RX_RING_ALIGN, 0,       /* alignment, boundary */
  901             BUS_SPACE_MAXADDR,          /* lowaddr */
  902             BUS_SPACE_MAXADDR,          /* highaddr */
  903             NULL, NULL,                 /* filter, filterarg */
  904             AGE_RX_RING_SZ,             /* maxsize */
  905             1,                          /* nsegments */
  906             AGE_RX_RING_SZ,             /* maxsegsize */
  907             0,                          /* flags */
  908             NULL, NULL,                 /* lockfunc, lockarg */
  909             &sc->age_cdata.age_rx_ring_tag);
  910         if (error != 0) {
  911                 device_printf(sc->age_dev,
  912                     "could not create Rx ring DMA tag.\n");
  913                 goto fail;
  914         }
  915 
  916         /* Create tag for Rx return ring. */
  917         error = bus_dma_tag_create(
  918             sc->age_cdata.age_parent_tag, /* parent */
  919             AGE_RR_RING_ALIGN, 0,       /* alignment, boundary */
  920             BUS_SPACE_MAXADDR,          /* lowaddr */
  921             BUS_SPACE_MAXADDR,          /* highaddr */
  922             NULL, NULL,                 /* filter, filterarg */
  923             AGE_RR_RING_SZ,             /* maxsize */
  924             1,                          /* nsegments */
  925             AGE_RR_RING_SZ,             /* maxsegsize */
  926             0,                          /* flags */
  927             NULL, NULL,                 /* lockfunc, lockarg */
  928             &sc->age_cdata.age_rr_ring_tag);
  929         if (error != 0) {
  930                 device_printf(sc->age_dev,
  931                     "could not create Rx return ring DMA tag.\n");
  932                 goto fail;
  933         }
  934 
  935         /* Create tag for coalesing message block. */
  936         error = bus_dma_tag_create(
  937             sc->age_cdata.age_parent_tag, /* parent */
  938             AGE_CMB_ALIGN, 0,           /* alignment, boundary */
  939             BUS_SPACE_MAXADDR,          /* lowaddr */
  940             BUS_SPACE_MAXADDR,          /* highaddr */
  941             NULL, NULL,                 /* filter, filterarg */
  942             AGE_CMB_BLOCK_SZ,           /* maxsize */
  943             1,                          /* nsegments */
  944             AGE_CMB_BLOCK_SZ,           /* maxsegsize */
  945             0,                          /* flags */
  946             NULL, NULL,                 /* lockfunc, lockarg */
  947             &sc->age_cdata.age_cmb_block_tag);
  948         if (error != 0) {
  949                 device_printf(sc->age_dev,
  950                     "could not create CMB DMA tag.\n");
  951                 goto fail;
  952         }
  953 
  954         /* Create tag for statistics message block. */
  955         error = bus_dma_tag_create(
  956             sc->age_cdata.age_parent_tag, /* parent */
  957             AGE_SMB_ALIGN, 0,           /* alignment, boundary */
  958             BUS_SPACE_MAXADDR,          /* lowaddr */
  959             BUS_SPACE_MAXADDR,          /* highaddr */
  960             NULL, NULL,                 /* filter, filterarg */
  961             AGE_SMB_BLOCK_SZ,           /* maxsize */
  962             1,                          /* nsegments */
  963             AGE_SMB_BLOCK_SZ,           /* maxsegsize */
  964             0,                          /* flags */
  965             NULL, NULL,                 /* lockfunc, lockarg */
  966             &sc->age_cdata.age_smb_block_tag);
  967         if (error != 0) {
  968                 device_printf(sc->age_dev,
  969                     "could not create SMB DMA tag.\n");
  970                 goto fail;
  971         }
  972 
  973         /* Allocate DMA'able memory and load the DMA map. */
  974         error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
  975             (void **)&sc->age_rdata.age_tx_ring,
  976             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  977             &sc->age_cdata.age_tx_ring_map);
  978         if (error != 0) {
  979                 device_printf(sc->age_dev,
  980                     "could not allocate DMA'able memory for Tx ring.\n");
  981                 goto fail;
  982         }
  983         ctx.age_busaddr = 0;
  984         error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
  985             sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
  986             AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
  987         if (error != 0 || ctx.age_busaddr == 0) {
  988                 device_printf(sc->age_dev,
  989                     "could not load DMA'able memory for Tx ring.\n");
  990                 goto fail;
  991         }
  992         sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
  993         /* Rx ring */
  994         error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
  995             (void **)&sc->age_rdata.age_rx_ring,
  996             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
  997             &sc->age_cdata.age_rx_ring_map);
  998         if (error != 0) {
  999                 device_printf(sc->age_dev,
 1000                     "could not allocate DMA'able memory for Rx ring.\n");
 1001                 goto fail;
 1002         }
 1003         ctx.age_busaddr = 0;
 1004         error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
 1005             sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
 1006             AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
 1007         if (error != 0 || ctx.age_busaddr == 0) {
 1008                 device_printf(sc->age_dev,
 1009                     "could not load DMA'able memory for Rx ring.\n");
 1010                 goto fail;
 1011         }
 1012         sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
 1013         /* Rx return ring */
 1014         error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
 1015             (void **)&sc->age_rdata.age_rr_ring,
 1016             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1017             &sc->age_cdata.age_rr_ring_map);
 1018         if (error != 0) {
 1019                 device_printf(sc->age_dev,
 1020                     "could not allocate DMA'able memory for Rx return ring.\n");
 1021                 goto fail;
 1022         }
 1023         ctx.age_busaddr = 0;
 1024         error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
 1025             sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
 1026             AGE_RR_RING_SZ, age_dmamap_cb,
 1027             &ctx, 0);
 1028         if (error != 0 || ctx.age_busaddr == 0) {
 1029                 device_printf(sc->age_dev,
 1030                     "could not load DMA'able memory for Rx return ring.\n");
 1031                 goto fail;
 1032         }
 1033         sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
 1034         /* CMB block */
 1035         error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
 1036             (void **)&sc->age_rdata.age_cmb_block,
 1037             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1038             &sc->age_cdata.age_cmb_block_map);
 1039         if (error != 0) {
 1040                 device_printf(sc->age_dev,
 1041                     "could not allocate DMA'able memory for CMB block.\n");
 1042                 goto fail;
 1043         }
 1044         ctx.age_busaddr = 0;
 1045         error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
 1046             sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
 1047             AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
 1048         if (error != 0 || ctx.age_busaddr == 0) {
 1049                 device_printf(sc->age_dev,
 1050                     "could not load DMA'able memory for CMB block.\n");
 1051                 goto fail;
 1052         }
 1053         sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
 1054         /* SMB block */
 1055         error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
 1056             (void **)&sc->age_rdata.age_smb_block,
 1057             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1058             &sc->age_cdata.age_smb_block_map);
 1059         if (error != 0) {
 1060                 device_printf(sc->age_dev,
 1061                     "could not allocate DMA'able memory for SMB block.\n");
 1062                 goto fail;
 1063         }
 1064         ctx.age_busaddr = 0;
 1065         error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
 1066             sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
 1067             AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
 1068         if (error != 0 || ctx.age_busaddr == 0) {
 1069                 device_printf(sc->age_dev,
 1070                     "could not load DMA'able memory for SMB block.\n");
 1071                 goto fail;
 1072         }
 1073         sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
 1074 
 1075         /*
 1076          * All ring buffer and DMA blocks should have the same
 1077          * high address part of 64bit DMA address space.
 1078          */
 1079         if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
 1080             (error = age_check_boundary(sc)) != 0) {
 1081                 device_printf(sc->age_dev, "4GB boundary crossed, "
 1082                     "switching to 32bit DMA addressing mode.\n");
 1083                 age_dma_free(sc);
 1084                 /* Limit DMA address space to 32bit and try again. */
 1085                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
 1086                 goto again;
 1087         }
 1088 
 1089         /*
 1090          * Create Tx/Rx buffer parent tag.
 1091          * L1 supports full 64bit DMA addressing in Tx/Rx buffers
 1092          * so it needs separate parent DMA tag.
 1093          * XXX
 1094          * It seems enabling 64bit DMA causes data corruption. Limit
 1095          * DMA address space to 32bit.
 1096          */
 1097         error = bus_dma_tag_create(
 1098             bus_get_dma_tag(sc->age_dev), /* parent */
 1099             1, 0,                       /* alignment, boundary */
 1100             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1101             BUS_SPACE_MAXADDR,          /* highaddr */
 1102             NULL, NULL,                 /* filter, filterarg */
 1103             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1104             0,                          /* nsegments */
 1105             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1106             0,                          /* flags */
 1107             NULL, NULL,                 /* lockfunc, lockarg */
 1108             &sc->age_cdata.age_buffer_tag);
 1109         if (error != 0) {
 1110                 device_printf(sc->age_dev,
 1111                     "could not create parent buffer DMA tag.\n");
 1112                 goto fail;
 1113         }
 1114 
 1115         /* Create tag for Tx buffers. */
 1116         error = bus_dma_tag_create(
 1117             sc->age_cdata.age_buffer_tag, /* parent */
 1118             1, 0,                       /* alignment, boundary */
 1119             BUS_SPACE_MAXADDR,          /* lowaddr */
 1120             BUS_SPACE_MAXADDR,          /* highaddr */
 1121             NULL, NULL,                 /* filter, filterarg */
 1122             AGE_TSO_MAXSIZE,            /* maxsize */
 1123             AGE_MAXTXSEGS,              /* nsegments */
 1124             AGE_TSO_MAXSEGSIZE,         /* maxsegsize */
 1125             0,                          /* flags */
 1126             NULL, NULL,                 /* lockfunc, lockarg */
 1127             &sc->age_cdata.age_tx_tag);
 1128         if (error != 0) {
 1129                 device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
 1130                 goto fail;
 1131         }
 1132 
 1133         /* Create tag for Rx buffers. */
 1134         error = bus_dma_tag_create(
 1135             sc->age_cdata.age_buffer_tag, /* parent */
 1136             1, 0,                       /* alignment, boundary */
 1137             BUS_SPACE_MAXADDR,          /* lowaddr */
 1138             BUS_SPACE_MAXADDR,          /* highaddr */
 1139             NULL, NULL,                 /* filter, filterarg */
 1140             MCLBYTES,                   /* maxsize */
 1141             1,                          /* nsegments */
 1142             MCLBYTES,                   /* maxsegsize */
 1143             0,                          /* flags */
 1144             NULL, NULL,                 /* lockfunc, lockarg */
 1145             &sc->age_cdata.age_rx_tag);
 1146         if (error != 0) {
 1147                 device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
 1148                 goto fail;
 1149         }
 1150 
 1151         /* Create DMA maps for Tx buffers. */
 1152         for (i = 0; i < AGE_TX_RING_CNT; i++) {
 1153                 txd = &sc->age_cdata.age_txdesc[i];
 1154                 txd->tx_m = NULL;
 1155                 txd->tx_dmamap = NULL;
 1156                 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
 1157                     &txd->tx_dmamap);
 1158                 if (error != 0) {
 1159                         device_printf(sc->age_dev,
 1160                             "could not create Tx dmamap.\n");
 1161                         goto fail;
 1162                 }
 1163         }
 1164         /* Create DMA maps for Rx buffers. */
 1165         if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
 1166             &sc->age_cdata.age_rx_sparemap)) != 0) {
 1167                 device_printf(sc->age_dev,
 1168                     "could not create spare Rx dmamap.\n");
 1169                 goto fail;
 1170         }
 1171         for (i = 0; i < AGE_RX_RING_CNT; i++) {
 1172                 rxd = &sc->age_cdata.age_rxdesc[i];
 1173                 rxd->rx_m = NULL;
 1174                 rxd->rx_dmamap = NULL;
 1175                 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
 1176                     &rxd->rx_dmamap);
 1177                 if (error != 0) {
 1178                         device_printf(sc->age_dev,
 1179                             "could not create Rx dmamap.\n");
 1180                         goto fail;
 1181                 }
 1182         }
 1183 
 1184 fail:
 1185         return (error);
 1186 }
 1187 
 1188 static void
 1189 age_dma_free(struct age_softc *sc)
 1190 {
 1191         struct age_txdesc *txd;
 1192         struct age_rxdesc *rxd;
 1193         int i;
 1194 
 1195         /* Tx buffers */
 1196         if (sc->age_cdata.age_tx_tag != NULL) {
 1197                 for (i = 0; i < AGE_TX_RING_CNT; i++) {
 1198                         txd = &sc->age_cdata.age_txdesc[i];
 1199                         if (txd->tx_dmamap != NULL) {
 1200                                 bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
 1201                                     txd->tx_dmamap);
 1202                                 txd->tx_dmamap = NULL;
 1203                         }
 1204                 }
 1205                 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
 1206                 sc->age_cdata.age_tx_tag = NULL;
 1207         }
 1208         /* Rx buffers */
 1209         if (sc->age_cdata.age_rx_tag != NULL) {
 1210                 for (i = 0; i < AGE_RX_RING_CNT; i++) {
 1211                         rxd = &sc->age_cdata.age_rxdesc[i];
 1212                         if (rxd->rx_dmamap != NULL) {
 1213                                 bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
 1214                                     rxd->rx_dmamap);
 1215                                 rxd->rx_dmamap = NULL;
 1216                         }
 1217                 }
 1218                 if (sc->age_cdata.age_rx_sparemap != NULL) {
 1219                         bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
 1220                             sc->age_cdata.age_rx_sparemap);
 1221                         sc->age_cdata.age_rx_sparemap = NULL;
 1222                 }
 1223                 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
 1224                 sc->age_cdata.age_rx_tag = NULL;
 1225         }
 1226         /* Tx ring. */
 1227         if (sc->age_cdata.age_tx_ring_tag != NULL) {
 1228                 if (sc->age_cdata.age_tx_ring_map != NULL)
 1229                         bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
 1230                             sc->age_cdata.age_tx_ring_map);
 1231                 if (sc->age_cdata.age_tx_ring_map != NULL &&
 1232                     sc->age_rdata.age_tx_ring != NULL)
 1233                         bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
 1234                             sc->age_rdata.age_tx_ring,
 1235                             sc->age_cdata.age_tx_ring_map);
 1236                 sc->age_rdata.age_tx_ring = NULL;
 1237                 sc->age_cdata.age_tx_ring_map = NULL;
 1238                 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
 1239                 sc->age_cdata.age_tx_ring_tag = NULL;
 1240         }
 1241         /* Rx ring. */
 1242         if (sc->age_cdata.age_rx_ring_tag != NULL) {
 1243                 if (sc->age_cdata.age_rx_ring_map != NULL)
 1244                         bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
 1245                             sc->age_cdata.age_rx_ring_map);
 1246                 if (sc->age_cdata.age_rx_ring_map != NULL &&
 1247                     sc->age_rdata.age_rx_ring != NULL)
 1248                         bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
 1249                             sc->age_rdata.age_rx_ring,
 1250                             sc->age_cdata.age_rx_ring_map);
 1251                 sc->age_rdata.age_rx_ring = NULL;
 1252                 sc->age_cdata.age_rx_ring_map = NULL;
 1253                 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
 1254                 sc->age_cdata.age_rx_ring_tag = NULL;
 1255         }
 1256         /* Rx return ring. */
 1257         if (sc->age_cdata.age_rr_ring_tag != NULL) {
 1258                 if (sc->age_cdata.age_rr_ring_map != NULL)
 1259                         bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
 1260                             sc->age_cdata.age_rr_ring_map);
 1261                 if (sc->age_cdata.age_rr_ring_map != NULL &&
 1262                     sc->age_rdata.age_rr_ring != NULL)
 1263                         bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
 1264                             sc->age_rdata.age_rr_ring,
 1265                             sc->age_cdata.age_rr_ring_map);
 1266                 sc->age_rdata.age_rr_ring = NULL;
 1267                 sc->age_cdata.age_rr_ring_map = NULL;
 1268                 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
 1269                 sc->age_cdata.age_rr_ring_tag = NULL;
 1270         }
 1271         /* CMB block */
 1272         if (sc->age_cdata.age_cmb_block_tag != NULL) {
 1273                 if (sc->age_cdata.age_cmb_block_map != NULL)
 1274                         bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
 1275                             sc->age_cdata.age_cmb_block_map);
 1276                 if (sc->age_cdata.age_cmb_block_map != NULL &&
 1277                     sc->age_rdata.age_cmb_block != NULL)
 1278                         bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
 1279                             sc->age_rdata.age_cmb_block,
 1280                             sc->age_cdata.age_cmb_block_map);
 1281                 sc->age_rdata.age_cmb_block = NULL;
 1282                 sc->age_cdata.age_cmb_block_map = NULL;
 1283                 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
 1284                 sc->age_cdata.age_cmb_block_tag = NULL;
 1285         }
 1286         /* SMB block */
 1287         if (sc->age_cdata.age_smb_block_tag != NULL) {
 1288                 if (sc->age_cdata.age_smb_block_map != NULL)
 1289                         bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
 1290                             sc->age_cdata.age_smb_block_map);
 1291                 if (sc->age_cdata.age_smb_block_map != NULL &&
 1292                     sc->age_rdata.age_smb_block != NULL)
 1293                         bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
 1294                             sc->age_rdata.age_smb_block,
 1295                             sc->age_cdata.age_smb_block_map);
 1296                 sc->age_rdata.age_smb_block = NULL;
 1297                 sc->age_cdata.age_smb_block_map = NULL;
 1298                 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
 1299                 sc->age_cdata.age_smb_block_tag = NULL;
 1300         }
 1301 
 1302         if (sc->age_cdata.age_buffer_tag != NULL) {
 1303                 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
 1304                 sc->age_cdata.age_buffer_tag = NULL;
 1305         }
 1306         if (sc->age_cdata.age_parent_tag != NULL) {
 1307                 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
 1308                 sc->age_cdata.age_parent_tag = NULL;
 1309         }
 1310 }
 1311 
 1312 /*
 1313  *      Make sure the interface is stopped at reboot time.
 1314  */
 1315 static int
 1316 age_shutdown(device_t dev)
 1317 {
 1318 
 1319         return (age_suspend(dev));
 1320 }
 1321 
 1322 static void
 1323 age_setwol(struct age_softc *sc)
 1324 {
 1325         struct ifnet *ifp;
 1326         struct mii_data *mii;
 1327         uint32_t reg, pmcs;
 1328         uint16_t pmstat;
 1329         int aneg, i, pmc;
 1330 
 1331         AGE_LOCK_ASSERT(sc);
 1332 
 1333         if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) != 0) {
 1334                 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
 1335                 /*
 1336                  * No PME capability, PHY power down.
 1337                  * XXX
 1338                  * Due to an unknown reason powering down PHY resulted
 1339                  * in unexpected results such as inaccessbility of
 1340                  * hardware of freshly rebooted system. Disable
 1341                  * powering down PHY until I got more information for
 1342                  * Attansic/Atheros PHY hardwares.
 1343                  */
 1344 #ifdef notyet
 1345                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
 1346                     MII_BMCR, BMCR_PDOWN);
 1347 #endif
 1348                 return;
 1349         }
 1350 
 1351         ifp = sc->age_ifp;
 1352         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 1353                 /*
 1354                  * Note, this driver resets the link speed to 10/100Mbps with
 1355                  * auto-negotiation but we don't know whether that operation
 1356                  * would succeed or not as it have no control after powering
 1357                  * off. If the renegotiation fail WOL may not work. Running
 1358                  * at 1Gbps will draw more power than 375mA at 3.3V which is
 1359                  * specified in PCI specification and that would result in
 1360                  * complete shutdowning power to ethernet controller.
 1361                  *
 1362                  * TODO
 1363                  *  Save current negotiated media speed/duplex/flow-control
 1364                  *  to softc and restore the same link again after resuming.
 1365                  *  PHY handling such as power down/resetting to 100Mbps
 1366                  *  may be better handled in suspend method in phy driver.
 1367                  */
 1368                 mii = device_get_softc(sc->age_miibus);
 1369                 mii_pollstat(mii);
 1370                 aneg = 0;
 1371                 if ((mii->mii_media_status & IFM_AVALID) != 0) {
 1372                         switch IFM_SUBTYPE(mii->mii_media_active) {
 1373                         case IFM_10_T:
 1374                         case IFM_100_TX:
 1375                                 goto got_link;
 1376                         case IFM_1000_T:
 1377                                 aneg++;
 1378                         default:
 1379                                 break;
 1380                         }
 1381                 }
 1382                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
 1383                     MII_100T2CR, 0);
 1384                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
 1385                     MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
 1386                     ANAR_10 | ANAR_CSMA);
 1387                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
 1388                     MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
 1389                 DELAY(1000);
 1390                 if (aneg != 0) {
 1391                         /* Poll link state until age(4) get a 10/100 link. */
 1392                         for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
 1393                                 mii_pollstat(mii);
 1394                                 if ((mii->mii_media_status & IFM_AVALID) != 0) {
 1395                                         switch (IFM_SUBTYPE(
 1396                                             mii->mii_media_active)) {
 1397                                         case IFM_10_T:
 1398                                         case IFM_100_TX:
 1399                                                 age_mac_config(sc);
 1400                                                 goto got_link;
 1401                                         default:
 1402                                                 break;
 1403                                         }
 1404                                 }
 1405                                 AGE_UNLOCK(sc);
 1406                                 pause("agelnk", hz);
 1407                                 AGE_LOCK(sc);
 1408                         }
 1409                         if (i == MII_ANEGTICKS_GIGE)
 1410                                 device_printf(sc->age_dev,
 1411                                     "establishing link failed, "
 1412                                     "WOL may not work!");
 1413                 }
 1414                 /*
 1415                  * No link, force MAC to have 100Mbps, full-duplex link.
 1416                  * This is the last resort and may/may not work.
 1417                  */
 1418                 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
 1419                 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
 1420                 age_mac_config(sc);
 1421         }
 1422 
 1423 got_link:
 1424         pmcs = 0;
 1425         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 1426                 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
 1427         CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
 1428         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 1429         reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
 1430         reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
 1431         if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
 1432                 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
 1433         if ((ifp->if_capenable & IFCAP_WOL) != 0) {
 1434                 reg |= MAC_CFG_RX_ENB;
 1435                 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 1436         }
 1437 
 1438         /* Request PME. */
 1439         pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
 1440         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 1441         if ((ifp->if_capenable & IFCAP_WOL) != 0)
 1442                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 1443         pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 1444 #ifdef notyet
 1445         /* See above for powering down PHY issues. */
 1446         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
 1447                 /* No WOL, PHY power down. */
 1448                 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
 1449                     MII_BMCR, BMCR_PDOWN);
 1450         }
 1451 #endif
 1452 }
 1453 
 1454 static int
 1455 age_suspend(device_t dev)
 1456 {
 1457         struct age_softc *sc;
 1458 
 1459         sc = device_get_softc(dev);
 1460 
 1461         AGE_LOCK(sc);
 1462         age_stop(sc);
 1463         age_setwol(sc);
 1464         AGE_UNLOCK(sc);
 1465 
 1466         return (0);
 1467 }
 1468 
 1469 static int
 1470 age_resume(device_t dev)
 1471 {
 1472         struct age_softc *sc;
 1473         struct ifnet *ifp;
 1474 
 1475         sc = device_get_softc(dev);
 1476 
 1477         AGE_LOCK(sc);
 1478         age_phy_reset(sc);
 1479         ifp = sc->age_ifp;
 1480         if ((ifp->if_flags & IFF_UP) != 0)
 1481                 age_init_locked(sc);
 1482 
 1483         AGE_UNLOCK(sc);
 1484 
 1485         return (0);
 1486 }
 1487 
 1488 static int
 1489 age_encap(struct age_softc *sc, struct mbuf **m_head)
 1490 {
 1491         struct age_txdesc *txd, *txd_last;
 1492         struct tx_desc *desc;
 1493         struct mbuf *m;
 1494         struct ip *ip;
 1495         struct tcphdr *tcp;
 1496         bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
 1497         bus_dmamap_t map;
 1498         uint32_t cflags, ip_off, poff, vtag;
 1499         int error, i, nsegs, prod, si;
 1500 
 1501         AGE_LOCK_ASSERT(sc);
 1502 
 1503         M_ASSERTPKTHDR((*m_head));
 1504 
 1505         m = *m_head;
 1506         ip = NULL;
 1507         tcp = NULL;
 1508         cflags = vtag = 0;
 1509         ip_off = poff = 0;
 1510         if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
 1511                 /*
 1512                  * L1 requires offset of TCP/UDP payload in its Tx
 1513                  * descriptor to perform hardware Tx checksum offload.
 1514                  * Additionally, TSO requires IP/TCP header size and
 1515                  * modification of IP/TCP header in order to make TSO
 1516                  * engine work. This kind of operation takes many CPU
 1517                  * cycles on FreeBSD so fast host CPU is needed to get
 1518                  * smooth TSO performance.
 1519                  */
 1520                 struct ether_header *eh;
 1521 
 1522                 if (M_WRITABLE(m) == 0) {
 1523                         /* Get a writable copy. */
 1524                         m = m_dup(*m_head, M_DONTWAIT);
 1525                         /* Release original mbufs. */
 1526                         m_freem(*m_head);
 1527                         if (m == NULL) {
 1528                                 *m_head = NULL;
 1529                                 return (ENOBUFS);
 1530                         }
 1531                         *m_head = m;
 1532                 }
 1533                 ip_off = sizeof(struct ether_header);
 1534                 m = m_pullup(m, ip_off);
 1535                 if (m == NULL) {
 1536                         *m_head = NULL;
 1537                         return (ENOBUFS);
 1538                 }
 1539                 eh = mtod(m, struct ether_header *);
 1540                 /*
 1541                  * Check if hardware VLAN insertion is off.
 1542                  * Additional check for LLC/SNAP frame?
 1543                  */
 1544                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 1545                         ip_off = sizeof(struct ether_vlan_header);
 1546                         m = m_pullup(m, ip_off);
 1547                         if (m == NULL) {
 1548                                 *m_head = NULL;
 1549                                 return (ENOBUFS);
 1550                         }
 1551                 }
 1552                 m = m_pullup(m, ip_off + sizeof(struct ip));
 1553                 if (m == NULL) {
 1554                         *m_head = NULL;
 1555                         return (ENOBUFS);
 1556                 }
 1557                 ip = (struct ip *)(mtod(m, char *) + ip_off);
 1558                 poff = ip_off + (ip->ip_hl << 2);
 1559                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 1560                         m = m_pullup(m, poff + sizeof(struct tcphdr));
 1561                         if (m == NULL) {
 1562                                 *m_head = NULL;
 1563                                 return (ENOBUFS);
 1564                         }
 1565                         ip = (struct ip *)(mtod(m, char *) + ip_off);
 1566                         tcp = (struct tcphdr *)(mtod(m, char *) + poff);
 1567                         /*
 1568                          * L1 requires IP/TCP header size and offset as
 1569                          * well as TCP pseudo checksum which complicates
 1570                          * TSO configuration. I guess this comes from the
 1571                          * adherence to Microsoft NDIS Large Send
 1572                          * specification which requires insertion of
 1573                          * pseudo checksum by upper stack. The pseudo
 1574                          * checksum that NDIS refers to doesn't include
 1575                          * TCP payload length so age(4) should recompute
 1576                          * the pseudo checksum here. Hopefully this wouldn't
 1577                          * be much burden on modern CPUs.
 1578                          * Reset IP checksum and recompute TCP pseudo
 1579                          * checksum as NDIS specification said.
 1580                          */
 1581                         ip->ip_sum = 0;
 1582                         if (poff + (tcp->th_off << 2) == m->m_pkthdr.len)
 1583                                 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
 1584                                     ip->ip_dst.s_addr,
 1585                                     htons((tcp->th_off << 2) + IPPROTO_TCP));
 1586                         else
 1587                                 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
 1588                                     ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 1589                 }
 1590                 *m_head = m;
 1591         }
 1592 
 1593         si = prod = sc->age_cdata.age_tx_prod;
 1594         txd = &sc->age_cdata.age_txdesc[prod];
 1595         txd_last = txd;
 1596         map = txd->tx_dmamap;
 1597 
 1598         error =  bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
 1599             *m_head, txsegs, &nsegs, 0);
 1600         if (error == EFBIG) {
 1601                 m = m_collapse(*m_head, M_DONTWAIT, AGE_MAXTXSEGS);
 1602                 if (m == NULL) {
 1603                         m_freem(*m_head);
 1604                         *m_head = NULL;
 1605                         return (ENOMEM);
 1606                 }
 1607                 *m_head = m;
 1608                 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
 1609                     *m_head, txsegs, &nsegs, 0);
 1610                 if (error != 0) {
 1611                         m_freem(*m_head);
 1612                         *m_head = NULL;
 1613                         return (error);
 1614                 }
 1615         } else if (error != 0)
 1616                 return (error);
 1617         if (nsegs == 0) {
 1618                 m_freem(*m_head);
 1619                 *m_head = NULL;
 1620                 return (EIO);
 1621         }
 1622 
 1623         /* Check descriptor overrun. */
 1624         if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
 1625                 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
 1626                 return (ENOBUFS);
 1627         }
 1628 
 1629         m = *m_head;
 1630         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 1631                 /* Configure TSO. */
 1632                 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
 1633                         /* Not TSO but IP/TCP checksum offload. */
 1634                         cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
 1635                         /* Clear TSO in order not to set AGE_TD_TSO_HDR. */
 1636                         m->m_pkthdr.csum_flags &= ~CSUM_TSO;
 1637                 } else {
 1638                         /* Request TSO and set MSS. */
 1639                         cflags |= AGE_TD_TSO_IPV4;
 1640                         cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
 1641                         cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
 1642                             AGE_TD_TSO_MSS_SHIFT);
 1643                 }
 1644                 /* Set IP/TCP header size. */
 1645                 cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
 1646                 cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
 1647         } else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
 1648                 /* Configure Tx IP/TCP/UDP checksum offload. */
 1649                 cflags |= AGE_TD_CSUM;
 1650                 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
 1651                         cflags |= AGE_TD_TCPCSUM;
 1652                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 1653                         cflags |= AGE_TD_UDPCSUM;
 1654                 /* Set checksum start offset. */
 1655                 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
 1656                 /* Set checksum insertion position of TCP/UDP. */
 1657                 cflags |= ((poff + m->m_pkthdr.csum_data) <<
 1658                     AGE_TD_CSUM_XSUMOFFSET_SHIFT);
 1659         }
 1660 
 1661         /* Configure VLAN hardware tag insertion. */
 1662         if ((m->m_flags & M_VLANTAG) != 0) {
 1663                 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
 1664                 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
 1665                 cflags |= AGE_TD_INSERT_VLAN_TAG;
 1666         }
 1667 
 1668         desc = NULL;
 1669         for (i = 0; i < nsegs; i++) {
 1670                 desc = &sc->age_rdata.age_tx_ring[prod];
 1671                 desc->addr = htole64(txsegs[i].ds_addr);
 1672                 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
 1673                 desc->flags = htole32(cflags);
 1674                 sc->age_cdata.age_tx_cnt++;
 1675                 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
 1676         }
 1677         /* Update producer index. */
 1678         sc->age_cdata.age_tx_prod = prod;
 1679 
 1680         /* Set EOP on the last descriptor. */
 1681         prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
 1682         desc = &sc->age_rdata.age_tx_ring[prod];
 1683         desc->flags |= htole32(AGE_TD_EOP);
 1684 
 1685         /* Lastly set TSO header and modify IP/TCP header for TSO operation. */
 1686         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 1687                 desc = &sc->age_rdata.age_tx_ring[si];
 1688                 desc->flags |= htole32(AGE_TD_TSO_HDR);
 1689         }
 1690 
 1691         /* Swap dmamap of the first and the last. */
 1692         txd = &sc->age_cdata.age_txdesc[prod];
 1693         map = txd_last->tx_dmamap;
 1694         txd_last->tx_dmamap = txd->tx_dmamap;
 1695         txd->tx_dmamap = map;
 1696         txd->tx_m = m;
 1697 
 1698         /* Sync descriptors. */
 1699         bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
 1700         bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
 1701             sc->age_cdata.age_tx_ring_map,
 1702             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1703 
 1704         return (0);
 1705 }
 1706 
 1707 static void
 1708 age_start(struct ifnet *ifp)
 1709 {
 1710         struct age_softc *sc;
 1711 
 1712         sc = ifp->if_softc;
 1713         AGE_LOCK(sc);
 1714         age_start_locked(ifp);
 1715         AGE_UNLOCK(sc);
 1716 }
 1717 
 1718 static void
 1719 age_start_locked(struct ifnet *ifp)
 1720 {
 1721         struct age_softc *sc;
 1722         struct mbuf *m_head;
 1723         int enq;
 1724 
 1725         sc = ifp->if_softc;
 1726 
 1727         AGE_LOCK_ASSERT(sc);
 1728 
 1729         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1730             IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0)
 1731                 return;
 1732 
 1733         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
 1734                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1735                 if (m_head == NULL)
 1736                         break;
 1737                 /*
 1738                  * Pack the data into the transmit ring. If we
 1739                  * don't have room, set the OACTIVE flag and wait
 1740                  * for the NIC to drain the ring.
 1741                  */
 1742                 if (age_encap(sc, &m_head)) {
 1743                         if (m_head == NULL)
 1744                                 break;
 1745                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1746                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1747                         break;
 1748                 }
 1749 
 1750                 enq++;
 1751                 /*
 1752                  * If there's a BPF listener, bounce a copy of this frame
 1753                  * to him.
 1754                  */
 1755                 ETHER_BPF_MTAP(ifp, m_head);
 1756         }
 1757 
 1758         if (enq > 0) {
 1759                 /* Update mbox. */
 1760                 AGE_COMMIT_MBOX(sc);
 1761                 /* Set a timeout in case the chip goes out to lunch. */
 1762                 sc->age_watchdog_timer = AGE_TX_TIMEOUT;
 1763         }
 1764 }
 1765 
 1766 static void
 1767 age_watchdog(struct age_softc *sc)
 1768 {
 1769         struct ifnet *ifp;
 1770 
 1771         AGE_LOCK_ASSERT(sc);
 1772 
 1773         if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
 1774                 return;
 1775 
 1776         ifp = sc->age_ifp;
 1777         if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
 1778                 if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
 1779                 ifp->if_oerrors++;
 1780                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1781                 age_init_locked(sc);
 1782                 return;
 1783         }
 1784         if (sc->age_cdata.age_tx_cnt == 0) {
 1785                 if_printf(sc->age_ifp,
 1786                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 1787                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1788                         age_start_locked(ifp);
 1789                 return;
 1790         }
 1791         if_printf(sc->age_ifp, "watchdog timeout\n");
 1792         ifp->if_oerrors++;
 1793         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1794         age_init_locked(sc);
 1795         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1796                 age_start_locked(ifp);
 1797 }
 1798 
 1799 static int
 1800 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1801 {
 1802         struct age_softc *sc;
 1803         struct ifreq *ifr;
 1804         struct mii_data *mii;
 1805         uint32_t reg;
 1806         int error, mask;
 1807 
 1808         sc = ifp->if_softc;
 1809         ifr = (struct ifreq *)data;
 1810         error = 0;
 1811         switch (cmd) {
 1812         case SIOCSIFMTU:
 1813                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
 1814                         error = EINVAL;
 1815                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1816                         AGE_LOCK(sc);
 1817                         ifp->if_mtu = ifr->ifr_mtu;
 1818                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1819                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1820                                 age_init_locked(sc);
 1821                         }
 1822                         AGE_UNLOCK(sc);
 1823                 }
 1824                 break;
 1825         case SIOCSIFFLAGS:
 1826                 AGE_LOCK(sc);
 1827                 if ((ifp->if_flags & IFF_UP) != 0) {
 1828                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1829                                 if (((ifp->if_flags ^ sc->age_if_flags)
 1830                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1831                                         age_rxfilter(sc);
 1832                         } else {
 1833                                 if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
 1834                                         age_init_locked(sc);
 1835                         }
 1836                 } else {
 1837                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1838                                 age_stop(sc);
 1839                 }
 1840                 sc->age_if_flags = ifp->if_flags;
 1841                 AGE_UNLOCK(sc);
 1842                 break;
 1843         case SIOCADDMULTI:
 1844         case SIOCDELMULTI:
 1845                 AGE_LOCK(sc);
 1846                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1847                         age_rxfilter(sc);
 1848                 AGE_UNLOCK(sc);
 1849                 break;
 1850         case SIOCSIFMEDIA:
 1851         case SIOCGIFMEDIA:
 1852                 mii = device_get_softc(sc->age_miibus);
 1853                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1854                 break;
 1855         case SIOCSIFCAP:
 1856                 AGE_LOCK(sc);
 1857                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1858                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1859                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 1860                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1861                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1862                                 ifp->if_hwassist |= AGE_CSUM_FEATURES;
 1863                         else
 1864                                 ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
 1865                 }
 1866                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1867                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
 1868                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1869                         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 1870                         reg &= ~MAC_CFG_RXCSUM_ENB;
 1871                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 1872                                 reg |= MAC_CFG_RXCSUM_ENB;
 1873                         CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 1874                 }
 1875                 if ((mask & IFCAP_TSO4) != 0 &&
 1876                     (ifp->if_capabilities & IFCAP_TSO4) != 0) {
 1877                         ifp->if_capenable ^= IFCAP_TSO4;
 1878                         if ((ifp->if_capenable & IFCAP_TSO4) != 0)
 1879                                 ifp->if_hwassist |= CSUM_TSO;
 1880                         else
 1881                                 ifp->if_hwassist &= ~CSUM_TSO;
 1882                 }
 1883 
 1884                 if ((mask & IFCAP_WOL_MCAST) != 0 &&
 1885                     (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
 1886                         ifp->if_capenable ^= IFCAP_WOL_MCAST;
 1887                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 1888                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 1889                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1890                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1891                     (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
 1892                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1893                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1894                     (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
 1895                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1896                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1897                     (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
 1898                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1899                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
 1900                                 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
 1901                         age_rxvlan(sc);
 1902                 }
 1903                 AGE_UNLOCK(sc);
 1904                 VLAN_CAPABILITIES(ifp);
 1905                 break;
 1906         default:
 1907                 error = ether_ioctl(ifp, cmd, data);
 1908                 break;
 1909         }
 1910 
 1911         return (error);
 1912 }
 1913 
 1914 static void
 1915 age_mac_config(struct age_softc *sc)
 1916 {
 1917         struct mii_data *mii;
 1918         uint32_t reg;
 1919 
 1920         AGE_LOCK_ASSERT(sc);
 1921 
 1922         mii = device_get_softc(sc->age_miibus);
 1923         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 1924         reg &= ~MAC_CFG_FULL_DUPLEX;
 1925         reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
 1926         reg &= ~MAC_CFG_SPEED_MASK;
 1927         /* Reprogram MAC with resolved speed/duplex. */
 1928         switch (IFM_SUBTYPE(mii->mii_media_active)) {
 1929         case IFM_10_T:
 1930         case IFM_100_TX:
 1931                 reg |= MAC_CFG_SPEED_10_100;
 1932                 break;
 1933         case IFM_1000_T:
 1934                 reg |= MAC_CFG_SPEED_1000;
 1935                 break;
 1936         }
 1937         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
 1938                 reg |= MAC_CFG_FULL_DUPLEX;
 1939 #ifdef notyet
 1940                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
 1941                         reg |= MAC_CFG_TX_FC;
 1942                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
 1943                         reg |= MAC_CFG_RX_FC;
 1944 #endif
 1945         }
 1946 
 1947         CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 1948 }
 1949 
 1950 static void
 1951 age_link_task(void *arg, int pending)
 1952 {
 1953         struct age_softc *sc;
 1954         struct mii_data *mii;
 1955         struct ifnet *ifp;
 1956         uint32_t reg;
 1957 
 1958         sc = (struct age_softc *)arg;
 1959 
 1960         AGE_LOCK(sc);
 1961         mii = device_get_softc(sc->age_miibus);
 1962         ifp = sc->age_ifp;
 1963         if (mii == NULL || ifp == NULL ||
 1964             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1965                 AGE_UNLOCK(sc);
 1966                 return;
 1967         }
 1968 
 1969         sc->age_flags &= ~AGE_FLAG_LINK;
 1970         if ((mii->mii_media_status & IFM_AVALID) != 0) {
 1971                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 1972                 case IFM_10_T:
 1973                 case IFM_100_TX:
 1974                 case IFM_1000_T:
 1975                         sc->age_flags |= AGE_FLAG_LINK;
 1976                         break;
 1977                 default:
 1978                         break;
 1979                 }
 1980         }
 1981 
 1982         /* Stop Rx/Tx MACs. */
 1983         age_stop_rxmac(sc);
 1984         age_stop_txmac(sc);
 1985 
 1986         /* Program MACs with resolved speed/duplex/flow-control. */
 1987         if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
 1988                 age_mac_config(sc);
 1989                 reg = CSR_READ_4(sc, AGE_MAC_CFG);
 1990                 /* Restart DMA engine and Tx/Rx MAC. */
 1991                 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
 1992                     DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
 1993                 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
 1994                 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 1995         }
 1996 
 1997         AGE_UNLOCK(sc);
 1998 }
 1999 
 2000 static void
 2001 age_stats_update(struct age_softc *sc)
 2002 {
 2003         struct age_stats *stat;
 2004         struct smb *smb;
 2005         struct ifnet *ifp;
 2006 
 2007         AGE_LOCK_ASSERT(sc);
 2008 
 2009         stat = &sc->age_stat;
 2010 
 2011         bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
 2012             sc->age_cdata.age_smb_block_map,
 2013             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2014 
 2015         smb = sc->age_rdata.age_smb_block;
 2016         if (smb->updated == 0)
 2017                 return;
 2018 
 2019         ifp = sc->age_ifp;
 2020         /* Rx stats. */
 2021         stat->rx_frames += smb->rx_frames;
 2022         stat->rx_bcast_frames += smb->rx_bcast_frames;
 2023         stat->rx_mcast_frames += smb->rx_mcast_frames;
 2024         stat->rx_pause_frames += smb->rx_pause_frames;
 2025         stat->rx_control_frames += smb->rx_control_frames;
 2026         stat->rx_crcerrs += smb->rx_crcerrs;
 2027         stat->rx_lenerrs += smb->rx_lenerrs;
 2028         stat->rx_bytes += smb->rx_bytes;
 2029         stat->rx_runts += smb->rx_runts;
 2030         stat->rx_fragments += smb->rx_fragments;
 2031         stat->rx_pkts_64 += smb->rx_pkts_64;
 2032         stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
 2033         stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
 2034         stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
 2035         stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
 2036         stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
 2037         stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
 2038         stat->rx_pkts_truncated += smb->rx_pkts_truncated;
 2039         stat->rx_fifo_oflows += smb->rx_fifo_oflows;
 2040         stat->rx_desc_oflows += smb->rx_desc_oflows;
 2041         stat->rx_alignerrs += smb->rx_alignerrs;
 2042         stat->rx_bcast_bytes += smb->rx_bcast_bytes;
 2043         stat->rx_mcast_bytes += smb->rx_mcast_bytes;
 2044         stat->rx_pkts_filtered += smb->rx_pkts_filtered;
 2045 
 2046         /* Tx stats. */
 2047         stat->tx_frames += smb->tx_frames;
 2048         stat->tx_bcast_frames += smb->tx_bcast_frames;
 2049         stat->tx_mcast_frames += smb->tx_mcast_frames;
 2050         stat->tx_pause_frames += smb->tx_pause_frames;
 2051         stat->tx_excess_defer += smb->tx_excess_defer;
 2052         stat->tx_control_frames += smb->tx_control_frames;
 2053         stat->tx_deferred += smb->tx_deferred;
 2054         stat->tx_bytes += smb->tx_bytes;
 2055         stat->tx_pkts_64 += smb->tx_pkts_64;
 2056         stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
 2057         stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
 2058         stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
 2059         stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
 2060         stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
 2061         stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
 2062         stat->tx_single_colls += smb->tx_single_colls;
 2063         stat->tx_multi_colls += smb->tx_multi_colls;
 2064         stat->tx_late_colls += smb->tx_late_colls;
 2065         stat->tx_excess_colls += smb->tx_excess_colls;
 2066         stat->tx_underrun += smb->tx_underrun;
 2067         stat->tx_desc_underrun += smb->tx_desc_underrun;
 2068         stat->tx_lenerrs += smb->tx_lenerrs;
 2069         stat->tx_pkts_truncated += smb->tx_pkts_truncated;
 2070         stat->tx_bcast_bytes += smb->tx_bcast_bytes;
 2071         stat->tx_mcast_bytes += smb->tx_mcast_bytes;
 2072 
 2073         /* Update counters in ifnet. */
 2074         ifp->if_opackets += smb->tx_frames;
 2075 
 2076         ifp->if_collisions += smb->tx_single_colls +
 2077             smb->tx_multi_colls + smb->tx_late_colls +
 2078             smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
 2079 
 2080         ifp->if_oerrors += smb->tx_excess_colls +
 2081             smb->tx_late_colls + smb->tx_underrun +
 2082             smb->tx_pkts_truncated;
 2083 
 2084         ifp->if_ipackets += smb->rx_frames;
 2085 
 2086         ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
 2087             smb->rx_runts + smb->rx_pkts_truncated +
 2088             smb->rx_fifo_oflows + smb->rx_desc_oflows +
 2089             smb->rx_alignerrs;
 2090 
 2091         /* Update done, clear. */
 2092         smb->updated = 0;
 2093 
 2094         bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
 2095             sc->age_cdata.age_smb_block_map,
 2096             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2097 }
 2098 
 2099 static int
 2100 age_intr(void *arg)
 2101 {
 2102         struct age_softc *sc;
 2103         uint32_t status;
 2104 
 2105         sc = (struct age_softc *)arg;
 2106 
 2107         status = CSR_READ_4(sc, AGE_INTR_STATUS);
 2108         if (status == 0 || (status & AGE_INTRS) == 0)
 2109                 return (FILTER_STRAY);
 2110         /* Disable interrupts. */
 2111         CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
 2112         taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
 2113 
 2114         return (FILTER_HANDLED);
 2115 }
 2116 
 2117 static void
 2118 age_int_task(void *arg, int pending)
 2119 {
 2120         struct age_softc *sc;
 2121         struct ifnet *ifp;
 2122         struct cmb *cmb;
 2123         uint32_t status;
 2124 
 2125         sc = (struct age_softc *)arg;
 2126 
 2127         AGE_LOCK(sc);
 2128 
 2129         bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
 2130             sc->age_cdata.age_cmb_block_map,
 2131             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2132         cmb = sc->age_rdata.age_cmb_block;
 2133         status = le32toh(cmb->intr_status);
 2134         if (sc->age_morework != 0)
 2135                 status |= INTR_CMB_RX;
 2136         if ((status & AGE_INTRS) == 0)
 2137                 goto done;
 2138 
 2139         sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
 2140             TPD_CONS_SHIFT;
 2141         sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
 2142             RRD_PROD_SHIFT;
 2143         /* Let hardware know CMB was served. */
 2144         cmb->intr_status = 0;
 2145         bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
 2146             sc->age_cdata.age_cmb_block_map,
 2147             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2148 
 2149 #if 0
 2150         printf("INTR: 0x%08x\n", status);
 2151         status &= ~INTR_DIS_DMA;
 2152         CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
 2153 #endif
 2154         ifp = sc->age_ifp;
 2155         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 2156                 if ((status & INTR_CMB_RX) != 0)
 2157                         sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
 2158                             sc->age_process_limit);
 2159                 if ((status & INTR_CMB_TX) != 0)
 2160                         age_txintr(sc, sc->age_tpd_cons);
 2161                 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
 2162                         if ((status & INTR_DMA_RD_TO_RST) != 0)
 2163                                 device_printf(sc->age_dev,
 2164                                     "DMA read error! -- resetting\n");
 2165                         if ((status & INTR_DMA_WR_TO_RST) != 0)
 2166                                 device_printf(sc->age_dev,
 2167                                     "DMA write error! -- resetting\n");
 2168                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2169                         age_init_locked(sc);
 2170                 }
 2171                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2172                         age_start_locked(ifp);
 2173                 if ((status & INTR_SMB) != 0)
 2174                         age_stats_update(sc);
 2175         }
 2176 
 2177         /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
 2178         bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
 2179             sc->age_cdata.age_cmb_block_map,
 2180             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2181         status = le32toh(cmb->intr_status);
 2182         if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
 2183                 taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
 2184                 AGE_UNLOCK(sc);
 2185                 return;
 2186         }
 2187 
 2188 done:
 2189         /* Re-enable interrupts. */
 2190         CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
 2191         AGE_UNLOCK(sc);
 2192 }
 2193 
 2194 static void
 2195 age_txintr(struct age_softc *sc, int tpd_cons)
 2196 {
 2197         struct ifnet *ifp;
 2198         struct age_txdesc *txd;
 2199         int cons, prog;
 2200 
 2201         AGE_LOCK_ASSERT(sc);
 2202 
 2203         ifp = sc->age_ifp;
 2204 
 2205         bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
 2206             sc->age_cdata.age_tx_ring_map,
 2207             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2208 
 2209         /*
 2210          * Go through our Tx list and free mbufs for those
 2211          * frames which have been transmitted.
 2212          */
 2213         cons = sc->age_cdata.age_tx_cons;
 2214         for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
 2215                 if (sc->age_cdata.age_tx_cnt <= 0)
 2216                         break;
 2217                 prog++;
 2218                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2219                 sc->age_cdata.age_tx_cnt--;
 2220                 txd = &sc->age_cdata.age_txdesc[cons];
 2221                 /*
 2222                  * Clear Tx descriptors, it's not required but would
 2223                  * help debugging in case of Tx issues.
 2224                  */
 2225                 txd->tx_desc->addr = 0;
 2226                 txd->tx_desc->len = 0;
 2227                 txd->tx_desc->flags = 0;
 2228 
 2229                 if (txd->tx_m == NULL)
 2230                         continue;
 2231                 /* Reclaim transmitted mbufs. */
 2232                 bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
 2233                     BUS_DMASYNC_POSTWRITE);
 2234                 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
 2235                 m_freem(txd->tx_m);
 2236                 txd->tx_m = NULL;
 2237         }
 2238 
 2239         if (prog > 0) {
 2240                 sc->age_cdata.age_tx_cons = cons;
 2241 
 2242                 /*
 2243                  * Unarm watchdog timer only when there are no pending
 2244                  * Tx descriptors in queue.
 2245                  */
 2246                 if (sc->age_cdata.age_tx_cnt == 0)
 2247                         sc->age_watchdog_timer = 0;
 2248                 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
 2249                     sc->age_cdata.age_tx_ring_map,
 2250                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2251         }
 2252 }
 2253 
 2254 /* Receive a frame. */
 2255 static void
 2256 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
 2257 {
 2258         struct age_rxdesc *rxd;
 2259         struct rx_desc *desc;
 2260         struct ifnet *ifp;
 2261         struct mbuf *mp, *m;
 2262         uint32_t status, index, vtag;
 2263         int count, nsegs, pktlen;
 2264         int rx_cons;
 2265 
 2266         AGE_LOCK_ASSERT(sc);
 2267 
 2268         ifp = sc->age_ifp;
 2269         status = le32toh(rxrd->flags);
 2270         index = le32toh(rxrd->index);
 2271         rx_cons = AGE_RX_CONS(index);
 2272         nsegs = AGE_RX_NSEGS(index);
 2273 
 2274         sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
 2275         if ((status & AGE_RRD_ERROR) != 0 &&
 2276             (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
 2277             AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
 2278                 /*
 2279                  * We want to pass the following frames to upper
 2280                  * layer regardless of error status of Rx return
 2281                  * ring.
 2282                  *
 2283                  *  o IP/TCP/UDP checksum is bad.
 2284                  *  o frame length and protocol specific length
 2285                  *     does not match.
 2286                  */
 2287                 sc->age_cdata.age_rx_cons += nsegs;
 2288                 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
 2289                 return;
 2290         }
 2291 
 2292         pktlen = 0;
 2293         for (count = 0; count < nsegs; count++,
 2294             AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
 2295                 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
 2296                 mp = rxd->rx_m;
 2297                 desc = rxd->rx_desc;
 2298                 /* Add a new receive buffer to the ring. */
 2299                 if (age_newbuf(sc, rxd) != 0) {
 2300                         ifp->if_iqdrops++;
 2301                         /* Reuse Rx buffers. */
 2302                         if (sc->age_cdata.age_rxhead != NULL) {
 2303                                 m_freem(sc->age_cdata.age_rxhead);
 2304                                 AGE_RXCHAIN_RESET(sc);
 2305                         }
 2306                         break;
 2307                 }
 2308 
 2309                 /* The length of the first mbuf is computed last. */
 2310                 if (count != 0) {
 2311                         mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
 2312                         pktlen += mp->m_len;
 2313                 }
 2314 
 2315                 /* Chain received mbufs. */
 2316                 if (sc->age_cdata.age_rxhead == NULL) {
 2317                         sc->age_cdata.age_rxhead = mp;
 2318                         sc->age_cdata.age_rxtail = mp;
 2319                 } else {
 2320                         mp->m_flags &= ~M_PKTHDR;
 2321                         sc->age_cdata.age_rxprev_tail =
 2322                             sc->age_cdata.age_rxtail;
 2323                         sc->age_cdata.age_rxtail->m_next = mp;
 2324                         sc->age_cdata.age_rxtail = mp;
 2325                 }
 2326 
 2327                 if (count == nsegs - 1) {
 2328                         /*
 2329                          * It seems that L1 controller has no way
 2330                          * to tell hardware to strip CRC bytes.
 2331                          */
 2332                         sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
 2333                         if (nsegs > 1) {
 2334                                 /* Remove the CRC bytes in chained mbufs. */
 2335                                 pktlen -= ETHER_CRC_LEN;
 2336                                 if (mp->m_len <= ETHER_CRC_LEN) {
 2337                                         sc->age_cdata.age_rxtail =
 2338                                             sc->age_cdata.age_rxprev_tail;
 2339                                         sc->age_cdata.age_rxtail->m_len -=
 2340                                             (ETHER_CRC_LEN - mp->m_len);
 2341                                         sc->age_cdata.age_rxtail->m_next = NULL;
 2342                                         m_freem(mp);
 2343                                 } else {
 2344                                         mp->m_len -= ETHER_CRC_LEN;
 2345                                 }
 2346                         }
 2347 
 2348                         m = sc->age_cdata.age_rxhead;
 2349                         m->m_flags |= M_PKTHDR;
 2350                         m->m_pkthdr.rcvif = ifp;
 2351                         m->m_pkthdr.len = sc->age_cdata.age_rxlen;
 2352                         /* Set the first mbuf length. */
 2353                         m->m_len = sc->age_cdata.age_rxlen - pktlen;
 2354 
 2355                         /*
 2356                          * Set checksum information.
 2357                          * It seems that L1 controller can compute partial
 2358                          * checksum. The partial checksum value can be used
 2359                          * to accelerate checksum computation for fragmented
 2360                          * TCP/UDP packets. Upper network stack already
 2361                          * takes advantage of the partial checksum value in
 2362                          * IP reassembly stage. But I'm not sure the
 2363                          * correctness of the partial hardware checksum
 2364                          * assistance due to lack of data sheet. If it is
 2365                          * proven to work on L1 I'll enable it.
 2366                          */
 2367                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 2368                             (status & AGE_RRD_IPV4) != 0) {
 2369                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2370                                 if ((status & AGE_RRD_IPCSUM_NOK) == 0)
 2371                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2372                                 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
 2373                                     (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
 2374                                         m->m_pkthdr.csum_flags |=
 2375                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
 2376                                         m->m_pkthdr.csum_data = 0xffff;
 2377                                 }
 2378                                 /*
 2379                                  * Don't mark bad checksum for TCP/UDP frames
 2380                                  * as fragmented frames may always have set
 2381                                  * bad checksummed bit of descriptor status.
 2382                                  */
 2383                         }
 2384 
 2385                         /* Check for VLAN tagged frames. */
 2386                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
 2387                             (status & AGE_RRD_VLAN) != 0) {
 2388                                 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
 2389                                 m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
 2390                                 m->m_flags |= M_VLANTAG;
 2391                         }
 2392 
 2393                         /* Pass it on. */
 2394                         AGE_UNLOCK(sc);
 2395                         (*ifp->if_input)(ifp, m);
 2396                         AGE_LOCK(sc);
 2397 
 2398                         /* Reset mbuf chains. */
 2399                         AGE_RXCHAIN_RESET(sc);
 2400                 }
 2401         }
 2402 
 2403         if (count != nsegs) {
 2404                 sc->age_cdata.age_rx_cons += nsegs;
 2405                 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
 2406         } else
 2407                 sc->age_cdata.age_rx_cons = rx_cons;
 2408 }
 2409 
 2410 static int
 2411 age_rxintr(struct age_softc *sc, int rr_prod, int count)
 2412 {
 2413         struct rx_rdesc *rxrd;
 2414         int rr_cons, nsegs, pktlen, prog;
 2415 
 2416         AGE_LOCK_ASSERT(sc);
 2417 
 2418         rr_cons = sc->age_cdata.age_rr_cons;
 2419         if (rr_cons == rr_prod)
 2420                 return (0);
 2421 
 2422         bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
 2423             sc->age_cdata.age_rr_ring_map,
 2424             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2425         bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
 2426             sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE);
 2427 
 2428         for (prog = 0; rr_cons != rr_prod; prog++) {
 2429                 if (count <= 0)
 2430                         break;
 2431                 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
 2432                 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
 2433                 if (nsegs == 0)
 2434                         break;
 2435                 /*
 2436                  * Check number of segments against received bytes.
 2437                  * Non-matching value would indicate that hardware
 2438                  * is still trying to update Rx return descriptors.
 2439                  * I'm not sure whether this check is really needed.
 2440                  */
 2441                 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
 2442                 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
 2443                     (MCLBYTES - ETHER_ALIGN)))
 2444                         break;
 2445 
 2446                 prog++;
 2447                 /* Received a frame. */
 2448                 age_rxeof(sc, rxrd);
 2449                 /* Clear return ring. */
 2450                 rxrd->index = 0;
 2451                 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
 2452         }
 2453 
 2454         if (prog > 0) {
 2455                 /* Update the consumer index. */
 2456                 sc->age_cdata.age_rr_cons = rr_cons;
 2457 
 2458                 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
 2459                     sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
 2460                 /* Sync descriptors. */
 2461                 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
 2462                     sc->age_cdata.age_rr_ring_map,
 2463                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2464 
 2465                 /* Notify hardware availability of new Rx buffers. */
 2466                 AGE_COMMIT_MBOX(sc);
 2467         }
 2468 
 2469         return (count > 0 ? 0 : EAGAIN);
 2470 }
 2471 
 2472 static void
 2473 age_tick(void *arg)
 2474 {
 2475         struct age_softc *sc;
 2476         struct mii_data *mii;
 2477 
 2478         sc = (struct age_softc *)arg;
 2479 
 2480         AGE_LOCK_ASSERT(sc);
 2481 
 2482         mii = device_get_softc(sc->age_miibus);
 2483         mii_tick(mii);
 2484         age_watchdog(sc);
 2485         callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
 2486 }
 2487 
 2488 static void
 2489 age_reset(struct age_softc *sc)
 2490 {
 2491         uint32_t reg;
 2492         int i;
 2493 
 2494         CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
 2495         CSR_READ_4(sc, AGE_MASTER_CFG);
 2496         DELAY(1000);
 2497         for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
 2498                 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
 2499                         break;
 2500                 DELAY(10);
 2501         }
 2502 
 2503         if (i == 0)
 2504                 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
 2505         /* Initialize PCIe module. From Linux. */
 2506         CSR_WRITE_4(sc, 0x12FC, 0x6500);
 2507         CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
 2508 }
 2509 
 2510 static void
 2511 age_init(void *xsc)
 2512 {
 2513         struct age_softc *sc;
 2514 
 2515         sc = (struct age_softc *)xsc;
 2516         AGE_LOCK(sc);
 2517         age_init_locked(sc);
 2518         AGE_UNLOCK(sc);
 2519 }
 2520 
 2521 static void
 2522 age_init_locked(struct age_softc *sc)
 2523 {
 2524         struct ifnet *ifp;
 2525         struct mii_data *mii;
 2526         uint8_t eaddr[ETHER_ADDR_LEN];
 2527         bus_addr_t paddr;
 2528         uint32_t reg, fsize;
 2529         uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
 2530         int error;
 2531 
 2532         AGE_LOCK_ASSERT(sc);
 2533 
 2534         ifp = sc->age_ifp;
 2535         mii = device_get_softc(sc->age_miibus);
 2536 
 2537         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2538                 return;
 2539 
 2540         /*
 2541          * Cancel any pending I/O.
 2542          */
 2543         age_stop(sc);
 2544 
 2545         /*
 2546          * Reset the chip to a known state.
 2547          */
 2548         age_reset(sc);
 2549 
 2550         /* Initialize descriptors. */
 2551         error = age_init_rx_ring(sc);
 2552         if (error != 0) {
 2553                 device_printf(sc->age_dev, "no memory for Rx buffers.\n");
 2554                 age_stop(sc);
 2555                 return;
 2556         }
 2557         age_init_rr_ring(sc);
 2558         age_init_tx_ring(sc);
 2559         age_init_cmb_block(sc);
 2560         age_init_smb_block(sc);
 2561 
 2562         /* Reprogram the station address. */
 2563         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
 2564         CSR_WRITE_4(sc, AGE_PAR0,
 2565             eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
 2566         CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
 2567 
 2568         /* Set descriptor base addresses. */
 2569         paddr = sc->age_rdata.age_tx_ring_paddr;
 2570         CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
 2571         paddr = sc->age_rdata.age_rx_ring_paddr;
 2572         CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
 2573         paddr = sc->age_rdata.age_rr_ring_paddr;
 2574         CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
 2575         paddr = sc->age_rdata.age_tx_ring_paddr;
 2576         CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
 2577         paddr = sc->age_rdata.age_cmb_block_paddr;
 2578         CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
 2579         paddr = sc->age_rdata.age_smb_block_paddr;
 2580         CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
 2581         /* Set Rx/Rx return descriptor counter. */
 2582         CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
 2583             ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
 2584             DESC_RRD_CNT_MASK) |
 2585             ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
 2586         /* Set Tx descriptor counter. */
 2587         CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
 2588             (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
 2589 
 2590         /* Tell hardware that we're ready to load descriptors. */
 2591         CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
 2592 
 2593         /*
 2594          * Initialize mailbox register.
 2595          * Updated producer/consumer index information is exchanged
 2596          * through this mailbox register. However Tx producer and
 2597          * Rx return consumer/Rx producer are all shared such that
 2598          * it's hard to separate code path between Tx and Rx without
 2599          * locking. If L1 hardware have a separate mail box register
 2600          * for Tx and Rx consumer/producer management we could have
 2601          * indepent Tx/Rx handler which in turn Rx handler could have
 2602          * been run without any locking.
 2603          */
 2604         AGE_COMMIT_MBOX(sc);
 2605 
 2606         /* Configure IPG/IFG parameters. */
 2607         CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
 2608             ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
 2609             ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
 2610             ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
 2611             ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
 2612 
 2613         /* Set parameters for half-duplex media. */
 2614         CSR_WRITE_4(sc, AGE_HDPX_CFG,
 2615             ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
 2616             HDPX_CFG_LCOL_MASK) |
 2617             ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
 2618             HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
 2619             ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
 2620             HDPX_CFG_ABEBT_MASK) |
 2621             ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
 2622             HDPX_CFG_JAMIPG_MASK));
 2623 
 2624         /* Configure interrupt moderation timer. */
 2625         CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
 2626         reg = CSR_READ_4(sc, AGE_MASTER_CFG);
 2627         reg &= ~MASTER_MTIMER_ENB;
 2628         if (AGE_USECS(sc->age_int_mod) == 0)
 2629                 reg &= ~MASTER_ITIMER_ENB;
 2630         else
 2631                 reg |= MASTER_ITIMER_ENB;
 2632         CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
 2633         if (bootverbose)
 2634                 device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
 2635                     sc->age_int_mod);
 2636         CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
 2637 
 2638         /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
 2639         if (ifp->if_mtu < ETHERMTU)
 2640                 sc->age_max_frame_size = ETHERMTU;
 2641         else
 2642                 sc->age_max_frame_size = ifp->if_mtu;
 2643         sc->age_max_frame_size += ETHER_HDR_LEN +
 2644             sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
 2645         CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
 2646         /* Configure jumbo frame. */
 2647         fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
 2648         CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
 2649             (((fsize / sizeof(uint64_t)) <<
 2650             RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
 2651             ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
 2652             RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
 2653             ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
 2654             RXQ_JUMBO_CFG_RRD_TIMER_MASK));
 2655 
 2656         /* Configure flow-control parameters. From Linux. */
 2657         if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
 2658                 /*
 2659                  * Magic workaround for old-L1.
 2660                  * Don't know which hw revision requires this magic.
 2661                  */
 2662                 CSR_WRITE_4(sc, 0x12FC, 0x6500);
 2663                 /*
 2664                  * Another magic workaround for flow-control mode
 2665                  * change. From Linux.
 2666                  */
 2667                 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
 2668         }
 2669         /*
 2670          * TODO
 2671          *  Should understand pause parameter relationships between FIFO
 2672          *  size and number of Rx descriptors and Rx return descriptors.
 2673          *
 2674          *  Magic parameters came from Linux.
 2675          */
 2676         switch (sc->age_chip_rev) {
 2677         case 0x8001:
 2678         case 0x9001:
 2679         case 0x9002:
 2680         case 0x9003:
 2681                 rxf_hi = AGE_RX_RING_CNT / 16;
 2682                 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
 2683                 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
 2684                 rrd_lo = AGE_RR_RING_CNT / 16;
 2685                 break;
 2686         default:
 2687                 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
 2688                 rxf_lo = reg / 16;
 2689                 if (rxf_lo < 192)
 2690                         rxf_lo = 192;
 2691                 rxf_hi = (reg * 7) / 8;
 2692                 if (rxf_hi < rxf_lo)
 2693                         rxf_hi = rxf_lo + 16;
 2694                 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
 2695                 rrd_lo = reg / 8;
 2696                 rrd_hi = (reg * 7) / 8;
 2697                 if (rrd_lo < 2)
 2698                         rrd_lo = 2;
 2699                 if (rrd_hi < rrd_lo)
 2700                         rrd_hi = rrd_lo + 3;
 2701                 break;
 2702         }
 2703         CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
 2704             ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
 2705             RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
 2706             ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
 2707             RXQ_FIFO_PAUSE_THRESH_HI_MASK));
 2708         CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
 2709             ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
 2710             RXQ_RRD_PAUSE_THRESH_LO_MASK) |
 2711             ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
 2712             RXQ_RRD_PAUSE_THRESH_HI_MASK));
 2713 
 2714         /* Configure RxQ. */
 2715         CSR_WRITE_4(sc, AGE_RXQ_CFG,
 2716             ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
 2717             RXQ_CFG_RD_BURST_MASK) |
 2718             ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
 2719             RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
 2720             ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
 2721             RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
 2722             RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
 2723 
 2724         /* Configure TxQ. */
 2725         CSR_WRITE_4(sc, AGE_TXQ_CFG,
 2726             ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
 2727             TXQ_CFG_TPD_BURST_MASK) |
 2728             ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
 2729             TXQ_CFG_TX_FIFO_BURST_MASK) |
 2730             ((TXQ_CFG_TPD_FETCH_DEFAULT <<
 2731             TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
 2732             TXQ_CFG_ENB);
 2733 
 2734         CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
 2735             (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
 2736             TX_JUMBO_TPD_TH_MASK) |
 2737             ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
 2738             TX_JUMBO_TPD_IPG_MASK));
 2739         /* Configure DMA parameters. */
 2740         CSR_WRITE_4(sc, AGE_DMA_CFG,
 2741             DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
 2742             sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
 2743             sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
 2744 
 2745         /* Configure CMB DMA write threshold. */
 2746         CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
 2747             ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
 2748             CMB_WR_THRESH_RRD_MASK) |
 2749             ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
 2750             CMB_WR_THRESH_TPD_MASK));
 2751 
 2752         /* Set CMB/SMB timer and enable them. */
 2753         CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
 2754             ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
 2755             ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
 2756         /* Request SMB updates for every seconds. */
 2757         CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
 2758         CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
 2759 
 2760         /*
 2761          * Disable all WOL bits as WOL can interfere normal Rx
 2762          * operation.
 2763          */
 2764         CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
 2765 
 2766         /*
 2767          * Configure Tx/Rx MACs.
 2768          *  - Auto-padding for short frames.
 2769          *  - Enable CRC generation.
 2770          *  Start with full-duplex/1000Mbps media. Actual reconfiguration
 2771          *  of MAC is followed after link establishment.
 2772          */
 2773         CSR_WRITE_4(sc, AGE_MAC_CFG,
 2774             MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
 2775             MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
 2776             ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
 2777             MAC_CFG_PREAMBLE_MASK));
 2778         /* Set up the receive filter. */
 2779         age_rxfilter(sc);
 2780         age_rxvlan(sc);
 2781 
 2782         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 2783         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2784                 reg |= MAC_CFG_RXCSUM_ENB;
 2785 
 2786         /* Ack all pending interrupts and clear it. */
 2787         CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
 2788         CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
 2789 
 2790         /* Finally enable Tx/Rx MAC. */
 2791         CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
 2792 
 2793         sc->age_flags &= ~AGE_FLAG_LINK;
 2794         /* Switch to the current media. */
 2795         mii_mediachg(mii);
 2796 
 2797         callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
 2798 
 2799         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2800         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2801 }
 2802 
 2803 static void
 2804 age_stop(struct age_softc *sc)
 2805 {
 2806         struct ifnet *ifp;
 2807         struct age_txdesc *txd;
 2808         struct age_rxdesc *rxd;
 2809         uint32_t reg;
 2810         int i;
 2811 
 2812         AGE_LOCK_ASSERT(sc);
 2813         /*
 2814          * Mark the interface down and cancel the watchdog timer.
 2815          */
 2816         ifp = sc->age_ifp;
 2817         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2818         sc->age_flags &= ~AGE_FLAG_LINK;
 2819         callout_stop(&sc->age_tick_ch);
 2820         sc->age_watchdog_timer = 0;
 2821 
 2822         /*
 2823          * Disable interrupts.
 2824          */
 2825         CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
 2826         CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
 2827         /* Stop CMB/SMB updates. */
 2828         CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
 2829         /* Stop Rx/Tx MAC. */
 2830         age_stop_rxmac(sc);
 2831         age_stop_txmac(sc);
 2832         /* Stop DMA. */
 2833         CSR_WRITE_4(sc, AGE_DMA_CFG,
 2834             CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
 2835         /* Stop TxQ/RxQ. */
 2836         CSR_WRITE_4(sc, AGE_TXQ_CFG,
 2837             CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
 2838         CSR_WRITE_4(sc, AGE_RXQ_CFG,
 2839             CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
 2840         for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
 2841                 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
 2842                         break;
 2843                 DELAY(10);
 2844         }
 2845         if (i == 0)
 2846                 device_printf(sc->age_dev,
 2847                     "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
 2848 
 2849          /* Reclaim Rx buffers that have been processed. */
 2850         if (sc->age_cdata.age_rxhead != NULL)
 2851                 m_freem(sc->age_cdata.age_rxhead);
 2852         AGE_RXCHAIN_RESET(sc);
 2853         /*
 2854          * Free RX and TX mbufs still in the queues.
 2855          */
 2856         for (i = 0; i < AGE_RX_RING_CNT; i++) {
 2857                 rxd = &sc->age_cdata.age_rxdesc[i];
 2858                 if (rxd->rx_m != NULL) {
 2859                         bus_dmamap_sync(sc->age_cdata.age_rx_tag,
 2860                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 2861                         bus_dmamap_unload(sc->age_cdata.age_rx_tag,
 2862                             rxd->rx_dmamap);
 2863                         m_freem(rxd->rx_m);
 2864                         rxd->rx_m = NULL;
 2865                 }
 2866         }
 2867         for (i = 0; i < AGE_TX_RING_CNT; i++) {
 2868                 txd = &sc->age_cdata.age_txdesc[i];
 2869                 if (txd->tx_m != NULL) {
 2870                         bus_dmamap_sync(sc->age_cdata.age_tx_tag,
 2871                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 2872                         bus_dmamap_unload(sc->age_cdata.age_tx_tag,
 2873                             txd->tx_dmamap);
 2874                         m_freem(txd->tx_m);
 2875                         txd->tx_m = NULL;
 2876                 }
 2877         }
 2878 }
 2879 
 2880 static void
 2881 age_stop_txmac(struct age_softc *sc)
 2882 {
 2883         uint32_t reg;
 2884         int i;
 2885 
 2886         AGE_LOCK_ASSERT(sc);
 2887 
 2888         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 2889         if ((reg & MAC_CFG_TX_ENB) != 0) {
 2890                 reg &= ~MAC_CFG_TX_ENB;
 2891                 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 2892         }
 2893         /* Stop Tx DMA engine. */
 2894         reg = CSR_READ_4(sc, AGE_DMA_CFG);
 2895         if ((reg & DMA_CFG_RD_ENB) != 0) {
 2896                 reg &= ~DMA_CFG_RD_ENB;
 2897                 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
 2898         }
 2899         for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
 2900                 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
 2901                     (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
 2902                         break;
 2903                 DELAY(10);
 2904         }
 2905         if (i == 0)
 2906                 device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
 2907 }
 2908 
 2909 static void
 2910 age_stop_rxmac(struct age_softc *sc)
 2911 {
 2912         uint32_t reg;
 2913         int i;
 2914 
 2915         AGE_LOCK_ASSERT(sc);
 2916 
 2917         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 2918         if ((reg & MAC_CFG_RX_ENB) != 0) {
 2919                 reg &= ~MAC_CFG_RX_ENB;
 2920                 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 2921         }
 2922         /* Stop Rx DMA engine. */
 2923         reg = CSR_READ_4(sc, AGE_DMA_CFG);
 2924         if ((reg & DMA_CFG_WR_ENB) != 0) {
 2925                 reg &= ~DMA_CFG_WR_ENB;
 2926                 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
 2927         }
 2928         for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
 2929                 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
 2930                     (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
 2931                         break;
 2932                 DELAY(10);
 2933         }
 2934         if (i == 0)
 2935                 device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
 2936 }
 2937 
 2938 static void
 2939 age_init_tx_ring(struct age_softc *sc)
 2940 {
 2941         struct age_ring_data *rd;
 2942         struct age_txdesc *txd;
 2943         int i;
 2944 
 2945         AGE_LOCK_ASSERT(sc);
 2946 
 2947         sc->age_cdata.age_tx_prod = 0;
 2948         sc->age_cdata.age_tx_cons = 0;
 2949         sc->age_cdata.age_tx_cnt = 0;
 2950 
 2951         rd = &sc->age_rdata;
 2952         bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
 2953         for (i = 0; i < AGE_TX_RING_CNT; i++) {
 2954                 txd = &sc->age_cdata.age_txdesc[i];
 2955                 txd->tx_desc = &rd->age_tx_ring[i];
 2956                 txd->tx_m = NULL;
 2957         }
 2958 
 2959         bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
 2960             sc->age_cdata.age_tx_ring_map,
 2961             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2962 }
 2963 
 2964 static int
 2965 age_init_rx_ring(struct age_softc *sc)
 2966 {
 2967         struct age_ring_data *rd;
 2968         struct age_rxdesc *rxd;
 2969         int i;
 2970 
 2971         AGE_LOCK_ASSERT(sc);
 2972 
 2973         sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
 2974         sc->age_morework = 0;
 2975         rd = &sc->age_rdata;
 2976         bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
 2977         for (i = 0; i < AGE_RX_RING_CNT; i++) {
 2978                 rxd = &sc->age_cdata.age_rxdesc[i];
 2979                 rxd->rx_m = NULL;
 2980                 rxd->rx_desc = &rd->age_rx_ring[i];
 2981                 if (age_newbuf(sc, rxd) != 0)
 2982                         return (ENOBUFS);
 2983         }
 2984 
 2985         bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
 2986             sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
 2987 
 2988         return (0);
 2989 }
 2990 
 2991 static void
 2992 age_init_rr_ring(struct age_softc *sc)
 2993 {
 2994         struct age_ring_data *rd;
 2995 
 2996         AGE_LOCK_ASSERT(sc);
 2997 
 2998         sc->age_cdata.age_rr_cons = 0;
 2999         AGE_RXCHAIN_RESET(sc);
 3000 
 3001         rd = &sc->age_rdata;
 3002         bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
 3003         bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
 3004             sc->age_cdata.age_rr_ring_map,
 3005             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3006 }
 3007 
 3008 static void
 3009 age_init_cmb_block(struct age_softc *sc)
 3010 {
 3011         struct age_ring_data *rd;
 3012 
 3013         AGE_LOCK_ASSERT(sc);
 3014 
 3015         rd = &sc->age_rdata;
 3016         bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
 3017         bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
 3018             sc->age_cdata.age_cmb_block_map,
 3019             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3020 }
 3021 
 3022 static void
 3023 age_init_smb_block(struct age_softc *sc)
 3024 {
 3025         struct age_ring_data *rd;
 3026 
 3027         AGE_LOCK_ASSERT(sc);
 3028 
 3029         rd = &sc->age_rdata;
 3030         bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
 3031         bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
 3032             sc->age_cdata.age_smb_block_map,
 3033             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3034 }
 3035 
 3036 static int
 3037 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
 3038 {
 3039         struct rx_desc *desc;
 3040         struct mbuf *m;
 3041         bus_dma_segment_t segs[1];
 3042         bus_dmamap_t map;
 3043         int nsegs;
 3044 
 3045         AGE_LOCK_ASSERT(sc);
 3046 
 3047         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 3048         if (m == NULL)
 3049                 return (ENOBUFS);
 3050         m->m_len = m->m_pkthdr.len = MCLBYTES;
 3051         m_adj(m, ETHER_ALIGN);
 3052 
 3053         if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
 3054             sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 3055                 m_freem(m);
 3056                 return (ENOBUFS);
 3057         }
 3058         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 3059 
 3060         if (rxd->rx_m != NULL) {
 3061                 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
 3062                     BUS_DMASYNC_POSTREAD);
 3063                 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
 3064         }
 3065         map = rxd->rx_dmamap;
 3066         rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
 3067         sc->age_cdata.age_rx_sparemap = map;
 3068         bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
 3069             BUS_DMASYNC_PREREAD);
 3070         rxd->rx_m = m;
 3071 
 3072         desc = rxd->rx_desc;
 3073         desc->addr = htole64(segs[0].ds_addr);
 3074         desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
 3075             AGE_RD_LEN_SHIFT);
 3076         return (0);
 3077 }
 3078 
 3079 static void
 3080 age_rxvlan(struct age_softc *sc)
 3081 {
 3082         struct ifnet *ifp;
 3083         uint32_t reg;
 3084 
 3085         AGE_LOCK_ASSERT(sc);
 3086 
 3087         ifp = sc->age_ifp;
 3088         reg = CSR_READ_4(sc, AGE_MAC_CFG);
 3089         reg &= ~MAC_CFG_VLAN_TAG_STRIP;
 3090         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3091                 reg |= MAC_CFG_VLAN_TAG_STRIP;
 3092         CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
 3093 }
 3094 
 3095 static void
 3096 age_rxfilter(struct age_softc *sc)
 3097 {
 3098         struct ifnet *ifp;
 3099         struct ifmultiaddr *ifma;
 3100         uint32_t crc;
 3101         uint32_t mchash[2];
 3102         uint32_t rxcfg;
 3103 
 3104         AGE_LOCK_ASSERT(sc);
 3105 
 3106         ifp = sc->age_ifp;
 3107 
 3108         rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
 3109         rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
 3110         if ((ifp->if_flags & IFF_BROADCAST) != 0)
 3111                 rxcfg |= MAC_CFG_BCAST;
 3112         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 3113                 if ((ifp->if_flags & IFF_PROMISC) != 0)
 3114                         rxcfg |= MAC_CFG_PROMISC;
 3115                 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
 3116                         rxcfg |= MAC_CFG_ALLMULTI;
 3117                 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
 3118                 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
 3119                 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
 3120                 return;
 3121         }
 3122 
 3123         /* Program new filter. */
 3124         bzero(mchash, sizeof(mchash));
 3125 
 3126         if_maddr_rlock(ifp);
 3127         TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) {
 3128                 if (ifma->ifma_addr->sa_family != AF_LINK)
 3129                         continue;
 3130                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
 3131                     ifma->ifma_addr), ETHER_ADDR_LEN);
 3132                 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
 3133         }
 3134         if_maddr_runlock(ifp);
 3135 
 3136         CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
 3137         CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
 3138         CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
 3139 }
 3140 
 3141 static int
 3142 sysctl_age_stats(SYSCTL_HANDLER_ARGS)
 3143 {
 3144         struct age_softc *sc;
 3145         struct age_stats *stats;
 3146         int error, result;
 3147 
 3148         result = -1;
 3149         error = sysctl_handle_int(oidp, &result, 0, req);
 3150 
 3151         if (error != 0 || req->newptr == NULL)
 3152                 return (error);
 3153 
 3154         if (result != 1)
 3155                 return (error);
 3156 
 3157         sc = (struct age_softc *)arg1;
 3158         stats = &sc->age_stat;
 3159         printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
 3160         printf("Transmit good frames : %ju\n",
 3161             (uintmax_t)stats->tx_frames);
 3162         printf("Transmit good broadcast frames : %ju\n",
 3163             (uintmax_t)stats->tx_bcast_frames);
 3164         printf("Transmit good multicast frames : %ju\n",
 3165             (uintmax_t)stats->tx_mcast_frames);
 3166         printf("Transmit pause control frames : %u\n",
 3167             stats->tx_pause_frames);
 3168         printf("Transmit control frames : %u\n",
 3169             stats->tx_control_frames);
 3170         printf("Transmit frames with excessive deferrals : %u\n",
 3171             stats->tx_excess_defer);
 3172         printf("Transmit deferrals : %u\n",
 3173             stats->tx_deferred);
 3174         printf("Transmit good octets : %ju\n",
 3175             (uintmax_t)stats->tx_bytes);
 3176         printf("Transmit good broadcast octets : %ju\n",
 3177             (uintmax_t)stats->tx_bcast_bytes);
 3178         printf("Transmit good multicast octets : %ju\n",
 3179             (uintmax_t)stats->tx_mcast_bytes);
 3180         printf("Transmit frames 64 bytes : %ju\n",
 3181             (uintmax_t)stats->tx_pkts_64);
 3182         printf("Transmit frames 65 to 127 bytes : %ju\n",
 3183             (uintmax_t)stats->tx_pkts_65_127);
 3184         printf("Transmit frames 128 to 255 bytes : %ju\n",
 3185             (uintmax_t)stats->tx_pkts_128_255);
 3186         printf("Transmit frames 256 to 511 bytes : %ju\n",
 3187             (uintmax_t)stats->tx_pkts_256_511);
 3188         printf("Transmit frames 512 to 1024 bytes : %ju\n",
 3189             (uintmax_t)stats->tx_pkts_512_1023);
 3190         printf("Transmit frames 1024 to 1518 bytes : %ju\n",
 3191             (uintmax_t)stats->tx_pkts_1024_1518);
 3192         printf("Transmit frames 1519 to MTU bytes : %ju\n",
 3193             (uintmax_t)stats->tx_pkts_1519_max);
 3194         printf("Transmit single collisions : %u\n",
 3195             stats->tx_single_colls);
 3196         printf("Transmit multiple collisions : %u\n",
 3197             stats->tx_multi_colls);
 3198         printf("Transmit late collisions : %u\n",
 3199             stats->tx_late_colls);
 3200         printf("Transmit abort due to excessive collisions : %u\n",
 3201             stats->tx_excess_colls);
 3202         printf("Transmit underruns due to FIFO underruns : %u\n",
 3203             stats->tx_underrun);
 3204         printf("Transmit descriptor write-back errors : %u\n",
 3205             stats->tx_desc_underrun);
 3206         printf("Transmit frames with length mismatched frame size : %u\n",
 3207             stats->tx_lenerrs);
 3208         printf("Transmit frames with truncated due to MTU size : %u\n",
 3209             stats->tx_lenerrs);
 3210 
 3211         printf("Receive good frames : %ju\n",
 3212             (uintmax_t)stats->rx_frames);
 3213         printf("Receive good broadcast frames : %ju\n",
 3214             (uintmax_t)stats->rx_bcast_frames);
 3215         printf("Receive good multicast frames : %ju\n",
 3216             (uintmax_t)stats->rx_mcast_frames);
 3217         printf("Receive pause control frames : %u\n",
 3218             stats->rx_pause_frames);
 3219         printf("Receive control frames : %u\n",
 3220             stats->rx_control_frames);
 3221         printf("Receive CRC errors : %u\n",
 3222             stats->rx_crcerrs);
 3223         printf("Receive frames with length errors : %u\n",
 3224             stats->rx_lenerrs);
 3225         printf("Receive good octets : %ju\n",
 3226             (uintmax_t)stats->rx_bytes);
 3227         printf("Receive good broadcast octets : %ju\n",
 3228             (uintmax_t)stats->rx_bcast_bytes);
 3229         printf("Receive good multicast octets : %ju\n",
 3230             (uintmax_t)stats->rx_mcast_bytes);
 3231         printf("Receive frames too short : %u\n",
 3232             stats->rx_runts);
 3233         printf("Receive fragmented frames : %ju\n",
 3234             (uintmax_t)stats->rx_fragments);
 3235         printf("Receive frames 64 bytes : %ju\n",
 3236             (uintmax_t)stats->rx_pkts_64);
 3237         printf("Receive frames 65 to 127 bytes : %ju\n",
 3238             (uintmax_t)stats->rx_pkts_65_127);
 3239         printf("Receive frames 128 to 255 bytes : %ju\n",
 3240             (uintmax_t)stats->rx_pkts_128_255);
 3241         printf("Receive frames 256 to 511 bytes : %ju\n",
 3242             (uintmax_t)stats->rx_pkts_256_511);
 3243         printf("Receive frames 512 to 1024 bytes : %ju\n",
 3244             (uintmax_t)stats->rx_pkts_512_1023);
 3245         printf("Receive frames 1024 to 1518 bytes : %ju\n",
 3246             (uintmax_t)stats->rx_pkts_1024_1518);
 3247         printf("Receive frames 1519 to MTU bytes : %ju\n",
 3248             (uintmax_t)stats->rx_pkts_1519_max);
 3249         printf("Receive frames too long : %ju\n",
 3250             (uint64_t)stats->rx_pkts_truncated);
 3251         printf("Receive frames with FIFO overflow : %u\n",
 3252             stats->rx_fifo_oflows);
 3253         printf("Receive frames with return descriptor overflow : %u\n",
 3254             stats->rx_desc_oflows);
 3255         printf("Receive frames with alignment errors : %u\n",
 3256             stats->rx_alignerrs);
 3257         printf("Receive frames dropped due to address filtering : %ju\n",
 3258             (uint64_t)stats->rx_pkts_filtered);
 3259 
 3260         return (error);
 3261 }
 3262 
 3263 static int
 3264 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3265 {
 3266         int error, value;
 3267 
 3268         if (arg1 == NULL)
 3269                 return (EINVAL);
 3270         value = *(int *)arg1;
 3271         error = sysctl_handle_int(oidp, &value, 0, req);
 3272         if (error || req->newptr == NULL)
 3273                 return (error);
 3274         if (value < low || value > high)
 3275                 return (EINVAL);
 3276         *(int *)arg1 = value;
 3277 
 3278         return (0);
 3279 }
 3280 
 3281 static int
 3282 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
 3283 {
 3284         return (sysctl_int_range(oidp, arg1, arg2, req,
 3285             AGE_PROC_MIN, AGE_PROC_MAX));
 3286 }
 3287 
 3288 static int
 3289 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
 3290 {
 3291 
 3292         return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,
 3293             AGE_IM_TIMER_MAX));
 3294 }

Cache object: f42d6f33018f453e465d6680c9ab925e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.