The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/stge/if_stge.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $    */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
    5  *
    6  * Copyright (c) 2001 The NetBSD Foundation, Inc.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to The NetBSD Foundation
   10  * by Jason R. Thorpe.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   31  * POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * Device driver for the Sundance Tech. TC9021 10/100/1000
   36  * Ethernet controller.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #ifdef HAVE_KERNEL_OPTION_HEADERS
   43 #include "opt_device_polling.h"
   44 #endif
   45 
   46 #include <sys/param.h>
   47 #include <sys/systm.h>
   48 #include <sys/endian.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/malloc.h>
   51 #include <sys/kernel.h>
   52 #include <sys/module.h>
   53 #include <sys/socket.h>
   54 #include <sys/sockio.h>
   55 #include <sys/sysctl.h>
   56 #include <sys/taskqueue.h>
   57 
   58 #include <net/bpf.h>
   59 #include <net/ethernet.h>
   60 #include <net/if.h>
   61 #include <net/if_var.h>
   62 #include <net/if_dl.h>
   63 #include <net/if_media.h>
   64 #include <net/if_types.h>
   65 #include <net/if_vlan_var.h>
   66 
   67 #include <machine/bus.h>
   68 #include <machine/resource.h>
   69 #include <sys/bus.h>
   70 #include <sys/rman.h>
   71 
   72 #include <dev/mii/mii.h>
   73 #include <dev/mii/mii_bitbang.h>
   74 #include <dev/mii/miivar.h>
   75 
   76 #include <dev/pci/pcireg.h>
   77 #include <dev/pci/pcivar.h>
   78 
   79 #include <dev/stge/if_stgereg.h>
   80 
   81 #define STGE_CSUM_FEATURES      (CSUM_IP | CSUM_TCP | CSUM_UDP)
   82 
   83 MODULE_DEPEND(stge, pci, 1, 1, 1);
   84 MODULE_DEPEND(stge, ether, 1, 1, 1);
   85 MODULE_DEPEND(stge, miibus, 1, 1, 1);
   86 
   87 /* "device miibus" required.  See GENERIC if you get errors here. */
   88 #include "miibus_if.h"
   89 
   90 /*
   91  * Devices supported by this driver.
   92  */
   93 static const struct stge_product {
   94         uint16_t        stge_vendorid;
   95         uint16_t        stge_deviceid;
   96         const char      *stge_name;
   97 } stge_products[] = {
   98         { VENDOR_SUNDANCETI,    DEVICEID_SUNDANCETI_ST1023,
   99           "Sundance ST-1023 Gigabit Ethernet" },
  100 
  101         { VENDOR_SUNDANCETI,    DEVICEID_SUNDANCETI_ST2021,
  102           "Sundance ST-2021 Gigabit Ethernet" },
  103 
  104         { VENDOR_TAMARACK,      DEVICEID_TAMARACK_TC9021,
  105           "Tamarack TC9021 Gigabit Ethernet" },
  106 
  107         { VENDOR_TAMARACK,      DEVICEID_TAMARACK_TC9021_ALT,
  108           "Tamarack TC9021 Gigabit Ethernet" },
  109 
  110         /*
  111          * The Sundance sample boards use the Sundance vendor ID,
  112          * but the Tamarack product ID.
  113          */
  114         { VENDOR_SUNDANCETI,    DEVICEID_TAMARACK_TC9021,
  115           "Sundance TC9021 Gigabit Ethernet" },
  116 
  117         { VENDOR_SUNDANCETI,    DEVICEID_TAMARACK_TC9021_ALT,
  118           "Sundance TC9021 Gigabit Ethernet" },
  119 
  120         { VENDOR_DLINK,         DEVICEID_DLINK_DL4000,
  121           "D-Link DL-4000 Gigabit Ethernet" },
  122 
  123         { VENDOR_ANTARES,       DEVICEID_ANTARES_TC9021,
  124           "Antares Gigabit Ethernet" }
  125 };
  126 
  127 static int      stge_probe(device_t);
  128 static int      stge_attach(device_t);
  129 static int      stge_detach(device_t);
  130 static int      stge_shutdown(device_t);
  131 static int      stge_suspend(device_t);
  132 static int      stge_resume(device_t);
  133 
  134 static int      stge_encap(struct stge_softc *, struct mbuf **);
  135 static void     stge_start(struct ifnet *);
  136 static void     stge_start_locked(struct ifnet *);
  137 static void     stge_watchdog(struct stge_softc *);
  138 static int      stge_ioctl(struct ifnet *, u_long, caddr_t);
  139 static void     stge_init(void *);
  140 static void     stge_init_locked(struct stge_softc *);
  141 static void     stge_vlan_setup(struct stge_softc *);
  142 static void     stge_stop(struct stge_softc *);
  143 static void     stge_start_tx(struct stge_softc *);
  144 static void     stge_start_rx(struct stge_softc *);
  145 static void     stge_stop_tx(struct stge_softc *);
  146 static void     stge_stop_rx(struct stge_softc *);
  147 
  148 static void     stge_reset(struct stge_softc *, uint32_t);
  149 static int      stge_eeprom_wait(struct stge_softc *);
  150 static void     stge_read_eeprom(struct stge_softc *, int, uint16_t *);
  151 static void     stge_tick(void *);
  152 static void     stge_stats_update(struct stge_softc *);
  153 static void     stge_set_filter(struct stge_softc *);
  154 static void     stge_set_multi(struct stge_softc *);
  155 
  156 static void     stge_link_task(void *, int);
  157 static void     stge_intr(void *);
  158 static __inline int stge_tx_error(struct stge_softc *);
  159 static void     stge_txeof(struct stge_softc *);
  160 static int      stge_rxeof(struct stge_softc *);
  161 static __inline void stge_discard_rxbuf(struct stge_softc *, int);
  162 static int      stge_newbuf(struct stge_softc *, int);
  163 #ifndef __NO_STRICT_ALIGNMENT
  164 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
  165 #endif
  166 
  167 static int      stge_miibus_readreg(device_t, int, int);
  168 static int      stge_miibus_writereg(device_t, int, int, int);
  169 static void     stge_miibus_statchg(device_t);
  170 static int      stge_mediachange(struct ifnet *);
  171 static void     stge_mediastatus(struct ifnet *, struct ifmediareq *);
  172 
  173 static void     stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  174 static int      stge_dma_alloc(struct stge_softc *);
  175 static void     stge_dma_free(struct stge_softc *);
  176 static void     stge_dma_wait(struct stge_softc *);
  177 static void     stge_init_tx_ring(struct stge_softc *);
  178 static int      stge_init_rx_ring(struct stge_softc *);
  179 #ifdef DEVICE_POLLING
  180 static int      stge_poll(struct ifnet *, enum poll_cmd, int);
  181 #endif
  182 
  183 static void     stge_setwol(struct stge_softc *);
  184 static int      sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  185 static int      sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
  186 static int      sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
  187 
  188 /*
  189  * MII bit-bang glue
  190  */
  191 static uint32_t stge_mii_bitbang_read(device_t);
  192 static void     stge_mii_bitbang_write(device_t, uint32_t);
  193 
  194 static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
  195         stge_mii_bitbang_read,
  196         stge_mii_bitbang_write,
  197         {
  198                 PC_MgmtData,            /* MII_BIT_MDO */
  199                 PC_MgmtData,            /* MII_BIT_MDI */
  200                 PC_MgmtClk,             /* MII_BIT_MDC */
  201                 PC_MgmtDir,             /* MII_BIT_DIR_HOST_PHY */
  202                 0,                      /* MII_BIT_DIR_PHY_HOST */
  203         }
  204 };
  205 
  206 static device_method_t stge_methods[] = {
  207         /* Device interface */
  208         DEVMETHOD(device_probe,         stge_probe),
  209         DEVMETHOD(device_attach,        stge_attach),
  210         DEVMETHOD(device_detach,        stge_detach),
  211         DEVMETHOD(device_shutdown,      stge_shutdown),
  212         DEVMETHOD(device_suspend,       stge_suspend),
  213         DEVMETHOD(device_resume,        stge_resume),
  214 
  215         /* MII interface */
  216         DEVMETHOD(miibus_readreg,       stge_miibus_readreg),
  217         DEVMETHOD(miibus_writereg,      stge_miibus_writereg),
  218         DEVMETHOD(miibus_statchg,       stge_miibus_statchg),
  219 
  220         DEVMETHOD_END
  221 };
  222 
  223 static driver_t stge_driver = {
  224         "stge",
  225         stge_methods,
  226         sizeof(struct stge_softc)
  227 };
  228 
  229 DRIVER_MODULE(stge, pci, stge_driver, 0, 0);
  230 DRIVER_MODULE(miibus, stge, miibus_driver, 0, 0);
  231 
  232 static struct resource_spec stge_res_spec_io[] = {
  233         { SYS_RES_IOPORT,       PCIR_BAR(0),    RF_ACTIVE },
  234         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  235         { -1,                   0,              0 }
  236 };
  237 
  238 static struct resource_spec stge_res_spec_mem[] = {
  239         { SYS_RES_MEMORY,       PCIR_BAR(1),    RF_ACTIVE },
  240         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  241         { -1,                   0,              0 }
  242 };
  243 
  244 /*
  245  * stge_mii_bitbang_read: [mii bit-bang interface function]
  246  *
  247  *      Read the MII serial port for the MII bit-bang module.
  248  */
  249 static uint32_t
  250 stge_mii_bitbang_read(device_t dev)
  251 {
  252         struct stge_softc *sc;
  253         uint32_t val;
  254 
  255         sc = device_get_softc(dev);
  256 
  257         val = CSR_READ_1(sc, STGE_PhyCtrl);
  258         CSR_BARRIER(sc, STGE_PhyCtrl, 1,
  259             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  260         return (val);
  261 }
  262 
  263 /*
  264  * stge_mii_bitbang_write: [mii big-bang interface function]
  265  *
  266  *      Write the MII serial port for the MII bit-bang module.
  267  */
  268 static void
  269 stge_mii_bitbang_write(device_t dev, uint32_t val)
  270 {
  271         struct stge_softc *sc;
  272 
  273         sc = device_get_softc(dev);
  274 
  275         CSR_WRITE_1(sc, STGE_PhyCtrl, val);
  276         CSR_BARRIER(sc, STGE_PhyCtrl, 1,
  277             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  278 }
  279 
  280 /*
  281  * sc_miibus_readreg:   [mii interface function]
  282  *
  283  *      Read a PHY register on the MII of the TC9021.
  284  */
  285 static int
  286 stge_miibus_readreg(device_t dev, int phy, int reg)
  287 {
  288         struct stge_softc *sc;
  289         int error, val;
  290 
  291         sc = device_get_softc(dev);
  292 
  293         if (reg == STGE_PhyCtrl) {
  294                 /* XXX allow ip1000phy read STGE_PhyCtrl register. */
  295                 STGE_MII_LOCK(sc);
  296                 error = CSR_READ_1(sc, STGE_PhyCtrl);
  297                 STGE_MII_UNLOCK(sc);
  298                 return (error);
  299         }
  300 
  301         STGE_MII_LOCK(sc);
  302         val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
  303         STGE_MII_UNLOCK(sc);
  304         return (val);
  305 }
  306 
  307 /*
  308  * stge_miibus_writereg:        [mii interface function]
  309  *
  310  *      Write a PHY register on the MII of the TC9021.
  311  */
  312 static int
  313 stge_miibus_writereg(device_t dev, int phy, int reg, int val)
  314 {
  315         struct stge_softc *sc;
  316 
  317         sc = device_get_softc(dev);
  318 
  319         STGE_MII_LOCK(sc);
  320         mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
  321         STGE_MII_UNLOCK(sc);
  322         return (0);
  323 }
  324 
  325 /*
  326  * stge_miibus_statchg: [mii interface function]
  327  *
  328  *      Callback from MII layer when media changes.
  329  */
  330 static void
  331 stge_miibus_statchg(device_t dev)
  332 {
  333         struct stge_softc *sc;
  334 
  335         sc = device_get_softc(dev);
  336         taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
  337 }
  338 
  339 /*
  340  * stge_mediastatus:    [ifmedia interface function]
  341  *
  342  *      Get the current interface media status.
  343  */
  344 static void
  345 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  346 {
  347         struct stge_softc *sc;
  348         struct mii_data *mii;
  349 
  350         sc = ifp->if_softc;
  351         mii = device_get_softc(sc->sc_miibus);
  352 
  353         mii_pollstat(mii);
  354         ifmr->ifm_status = mii->mii_media_status;
  355         ifmr->ifm_active = mii->mii_media_active;
  356 }
  357 
  358 /*
  359  * stge_mediachange:    [ifmedia interface function]
  360  *
  361  *      Set hardware to newly-selected media.
  362  */
  363 static int
  364 stge_mediachange(struct ifnet *ifp)
  365 {
  366         struct stge_softc *sc;
  367         struct mii_data *mii;
  368 
  369         sc = ifp->if_softc;
  370         mii = device_get_softc(sc->sc_miibus);
  371         mii_mediachg(mii);
  372 
  373         return (0);
  374 }
  375 
  376 static int
  377 stge_eeprom_wait(struct stge_softc *sc)
  378 {
  379         int i;
  380 
  381         for (i = 0; i < STGE_TIMEOUT; i++) {
  382                 DELAY(1000);
  383                 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
  384                         return (0);
  385         }
  386         return (1);
  387 }
  388 
  389 /*
  390  * stge_read_eeprom:
  391  *
  392  *      Read data from the serial EEPROM.
  393  */
  394 static void
  395 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
  396 {
  397 
  398         if (stge_eeprom_wait(sc))
  399                 device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
  400 
  401         CSR_WRITE_2(sc, STGE_EepromCtrl,
  402             EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
  403         if (stge_eeprom_wait(sc))
  404                 device_printf(sc->sc_dev, "EEPROM read timed out\n");
  405         *data = CSR_READ_2(sc, STGE_EepromData);
  406 }
  407 
  408 static int
  409 stge_probe(device_t dev)
  410 {
  411         const struct stge_product *sp;
  412         int i;
  413         uint16_t vendor, devid;
  414 
  415         vendor = pci_get_vendor(dev);
  416         devid = pci_get_device(dev);
  417         sp = stge_products;
  418         for (i = 0; i < nitems(stge_products); i++, sp++) {
  419                 if (vendor == sp->stge_vendorid &&
  420                     devid == sp->stge_deviceid) {
  421                         device_set_desc(dev, sp->stge_name);
  422                         return (BUS_PROBE_DEFAULT);
  423                 }
  424         }
  425 
  426         return (ENXIO);
  427 }
  428 
  429 static int
  430 stge_attach(device_t dev)
  431 {
  432         struct stge_softc *sc;
  433         struct ifnet *ifp;
  434         uint8_t enaddr[ETHER_ADDR_LEN];
  435         int error, flags, i;
  436         uint16_t cmd;
  437         uint32_t val;
  438 
  439         error = 0;
  440         sc = device_get_softc(dev);
  441         sc->sc_dev = dev;
  442 
  443         mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  444             MTX_DEF);
  445         mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
  446         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
  447         TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
  448 
  449         /*
  450          * Map the device.
  451          */
  452         pci_enable_busmaster(dev);
  453         cmd = pci_read_config(dev, PCIR_COMMAND, 2);
  454         val = pci_read_config(dev, PCIR_BAR(1), 4);
  455         if (PCI_BAR_IO(val))
  456                 sc->sc_spec = stge_res_spec_mem;
  457         else {
  458                 val = pci_read_config(dev, PCIR_BAR(0), 4);
  459                 if (!PCI_BAR_IO(val)) {
  460                         device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
  461                         error = ENXIO;
  462                         goto fail;
  463                 }
  464                 sc->sc_spec = stge_res_spec_io;
  465         }
  466         error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
  467         if (error != 0) {
  468                 device_printf(dev, "couldn't allocate %s resources\n",
  469                     sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
  470                 goto fail;
  471         }
  472         sc->sc_rev = pci_get_revid(dev);
  473 
  474         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  475             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  476             "rxint_nframe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  477             &sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I",
  478             "stge rx interrupt nframe");
  479 
  480         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  481             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  482             "rxint_dmawait", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
  483             &sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I",
  484             "stge rx interrupt dmawait");
  485 
  486         /* Pull in device tunables. */
  487         sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
  488         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
  489             "rxint_nframe", &sc->sc_rxint_nframe);
  490         if (error == 0) {
  491                 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
  492                     sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
  493                         device_printf(dev, "rxint_nframe value out of range; "
  494                             "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
  495                         sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
  496                 }
  497         }
  498 
  499         sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
  500         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
  501             "rxint_dmawait", &sc->sc_rxint_dmawait);
  502         if (error == 0) {
  503                 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
  504                     sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
  505                         device_printf(dev, "rxint_dmawait value out of range; "
  506                             "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
  507                         sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
  508                 }
  509         }
  510 
  511         if ((error = stge_dma_alloc(sc)) != 0)
  512                 goto fail;
  513 
  514         /*
  515          * Determine if we're copper or fiber.  It affects how we
  516          * reset the card.
  517          */
  518         if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
  519                 sc->sc_usefiber = 1;
  520         else
  521                 sc->sc_usefiber = 0;
  522 
  523         /* Load LED configuration from EEPROM. */
  524         stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
  525 
  526         /*
  527          * Reset the chip to a known state.
  528          */
  529         STGE_LOCK(sc);
  530         stge_reset(sc, STGE_RESET_FULL);
  531         STGE_UNLOCK(sc);
  532 
  533         /*
  534          * Reading the station address from the EEPROM doesn't seem
  535          * to work, at least on my sample boards.  Instead, since
  536          * the reset sequence does AutoInit, read it from the station
  537          * address registers. For Sundance 1023 you can only read it
  538          * from EEPROM.
  539          */
  540         if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
  541                 uint16_t v;
  542 
  543                 v = CSR_READ_2(sc, STGE_StationAddress0);
  544                 enaddr[0] = v & 0xff;
  545                 enaddr[1] = v >> 8;
  546                 v = CSR_READ_2(sc, STGE_StationAddress1);
  547                 enaddr[2] = v & 0xff;
  548                 enaddr[3] = v >> 8;
  549                 v = CSR_READ_2(sc, STGE_StationAddress2);
  550                 enaddr[4] = v & 0xff;
  551                 enaddr[5] = v >> 8;
  552                 sc->sc_stge1023 = 0;
  553         } else {
  554                 uint16_t myaddr[ETHER_ADDR_LEN / 2];
  555                 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
  556                         stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
  557                             &myaddr[i]);
  558                         myaddr[i] = le16toh(myaddr[i]);
  559                 }
  560                 bcopy(myaddr, enaddr, sizeof(enaddr));
  561                 sc->sc_stge1023 = 1;
  562         }
  563 
  564         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  565         if (ifp == NULL) {
  566                 device_printf(sc->sc_dev, "failed to if_alloc()\n");
  567                 error = ENXIO;
  568                 goto fail;
  569         }
  570 
  571         ifp->if_softc = sc;
  572         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  573         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  574         ifp->if_ioctl = stge_ioctl;
  575         ifp->if_start = stge_start;
  576         ifp->if_init = stge_init;
  577         ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
  578         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
  579         IFQ_SET_READY(&ifp->if_snd);
  580         /* Revision B3 and earlier chips have checksum bug. */
  581         if (sc->sc_rev >= 0x0c) {
  582                 ifp->if_hwassist = STGE_CSUM_FEATURES;
  583                 ifp->if_capabilities = IFCAP_HWCSUM;
  584         } else {
  585                 ifp->if_hwassist = 0;
  586                 ifp->if_capabilities = 0;
  587         }
  588         ifp->if_capabilities |= IFCAP_WOL_MAGIC;
  589         ifp->if_capenable = ifp->if_capabilities;
  590 
  591         /*
  592          * Read some important bits from the PhyCtrl register.
  593          */
  594         sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
  595             (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
  596 
  597         /* Set up MII bus. */
  598         flags = MIIF_DOPAUSE;
  599         if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
  600                 flags |= MIIF_MACPRIV0;
  601         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
  602             stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
  603             flags);
  604         if (error != 0) {
  605                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  606                 goto fail;
  607         }
  608 
  609         ether_ifattach(ifp, enaddr);
  610 
  611         /* VLAN capability setup */
  612         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
  613         if (sc->sc_rev >= 0x0c)
  614                 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
  615         ifp->if_capenable = ifp->if_capabilities;
  616 #ifdef DEVICE_POLLING
  617         ifp->if_capabilities |= IFCAP_POLLING;
  618 #endif
  619         /*
  620          * Tell the upper layer(s) we support long frames.
  621          * Must appear after the call to ether_ifattach() because
  622          * ether_ifattach() sets ifi_hdrlen to the default value.
  623          */
  624         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  625 
  626         /*
  627          * The manual recommends disabling early transmit, so we
  628          * do.  It's disabled anyway, if using IP checksumming,
  629          * since the entire packet must be in the FIFO in order
  630          * for the chip to perform the checksum.
  631          */
  632         sc->sc_txthresh = 0x0fff;
  633 
  634         /*
  635          * Disable MWI if the PCI layer tells us to.
  636          */
  637         sc->sc_DMACtrl = 0;
  638         if ((cmd & PCIM_CMD_MWRICEN) == 0)
  639                 sc->sc_DMACtrl |= DMAC_MWIDisable;
  640 
  641         /*
  642          * Hookup IRQ
  643          */
  644         error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
  645             NULL, stge_intr, sc, &sc->sc_ih);
  646         if (error != 0) {
  647                 ether_ifdetach(ifp);
  648                 device_printf(sc->sc_dev, "couldn't set up IRQ\n");
  649                 sc->sc_ifp = NULL;
  650                 goto fail;
  651         }
  652 
  653 fail:
  654         if (error != 0)
  655                 stge_detach(dev);
  656 
  657         return (error);
  658 }
  659 
  660 static int
  661 stge_detach(device_t dev)
  662 {
  663         struct stge_softc *sc;
  664         struct ifnet *ifp;
  665 
  666         sc = device_get_softc(dev);
  667 
  668         ifp = sc->sc_ifp;
  669 #ifdef DEVICE_POLLING
  670         if (ifp && ifp->if_capenable & IFCAP_POLLING)
  671                 ether_poll_deregister(ifp);
  672 #endif
  673         if (device_is_attached(dev)) {
  674                 STGE_LOCK(sc);
  675                 /* XXX */
  676                 sc->sc_detach = 1;
  677                 stge_stop(sc);
  678                 STGE_UNLOCK(sc);
  679                 callout_drain(&sc->sc_tick_ch);
  680                 taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
  681                 ether_ifdetach(ifp);
  682         }
  683 
  684         if (sc->sc_miibus != NULL) {
  685                 device_delete_child(dev, sc->sc_miibus);
  686                 sc->sc_miibus = NULL;
  687         }
  688         bus_generic_detach(dev);
  689         stge_dma_free(sc);
  690 
  691         if (ifp != NULL) {
  692                 if_free(ifp);
  693                 sc->sc_ifp = NULL;
  694         }
  695 
  696         if (sc->sc_ih) {
  697                 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
  698                 sc->sc_ih = NULL;
  699         }
  700 
  701         if (sc->sc_spec)
  702                 bus_release_resources(dev, sc->sc_spec, sc->sc_res);
  703 
  704         mtx_destroy(&sc->sc_mii_mtx);
  705         mtx_destroy(&sc->sc_mtx);
  706 
  707         return (0);
  708 }
  709 
  710 struct stge_dmamap_arg {
  711         bus_addr_t      stge_busaddr;
  712 };
  713 
  714 static void
  715 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  716 {
  717         struct stge_dmamap_arg *ctx;
  718 
  719         if (error != 0)
  720                 return;
  721 
  722         ctx = (struct stge_dmamap_arg *)arg;
  723         ctx->stge_busaddr = segs[0].ds_addr;
  724 }
  725 
  726 static int
  727 stge_dma_alloc(struct stge_softc *sc)
  728 {
  729         struct stge_dmamap_arg ctx;
  730         struct stge_txdesc *txd;
  731         struct stge_rxdesc *rxd;
  732         int error, i;
  733 
  734         /* create parent tag. */
  735         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
  736                     1, 0,                       /* algnmnt, boundary */
  737                     STGE_DMA_MAXADDR,           /* lowaddr */
  738                     BUS_SPACE_MAXADDR,          /* highaddr */
  739                     NULL, NULL,                 /* filter, filterarg */
  740                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
  741                     0,                          /* nsegments */
  742                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
  743                     0,                          /* flags */
  744                     NULL, NULL,                 /* lockfunc, lockarg */
  745                     &sc->sc_cdata.stge_parent_tag);
  746         if (error != 0) {
  747                 device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
  748                 goto fail;
  749         }
  750         /* create tag for Tx ring. */
  751         error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
  752                     STGE_RING_ALIGN, 0,         /* algnmnt, boundary */
  753                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  754                     BUS_SPACE_MAXADDR,          /* highaddr */
  755                     NULL, NULL,                 /* filter, filterarg */
  756                     STGE_TX_RING_SZ,            /* maxsize */
  757                     1,                          /* nsegments */
  758                     STGE_TX_RING_SZ,            /* maxsegsize */
  759                     0,                          /* flags */
  760                     NULL, NULL,                 /* lockfunc, lockarg */
  761                     &sc->sc_cdata.stge_tx_ring_tag);
  762         if (error != 0) {
  763                 device_printf(sc->sc_dev,
  764                     "failed to allocate Tx ring DMA tag\n");
  765                 goto fail;
  766         }
  767 
  768         /* create tag for Rx ring. */
  769         error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
  770                     STGE_RING_ALIGN, 0,         /* algnmnt, boundary */
  771                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  772                     BUS_SPACE_MAXADDR,          /* highaddr */
  773                     NULL, NULL,                 /* filter, filterarg */
  774                     STGE_RX_RING_SZ,            /* maxsize */
  775                     1,                          /* nsegments */
  776                     STGE_RX_RING_SZ,            /* maxsegsize */
  777                     0,                          /* flags */
  778                     NULL, NULL,                 /* lockfunc, lockarg */
  779                     &sc->sc_cdata.stge_rx_ring_tag);
  780         if (error != 0) {
  781                 device_printf(sc->sc_dev,
  782                     "failed to allocate Rx ring DMA tag\n");
  783                 goto fail;
  784         }
  785 
  786         /* create tag for Tx buffers. */
  787         error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
  788                     1, 0,                       /* algnmnt, boundary */
  789                     BUS_SPACE_MAXADDR,          /* lowaddr */
  790                     BUS_SPACE_MAXADDR,          /* highaddr */
  791                     NULL, NULL,                 /* filter, filterarg */
  792                     MCLBYTES * STGE_MAXTXSEGS,  /* maxsize */
  793                     STGE_MAXTXSEGS,             /* nsegments */
  794                     MCLBYTES,                   /* maxsegsize */
  795                     0,                          /* flags */
  796                     NULL, NULL,                 /* lockfunc, lockarg */
  797                     &sc->sc_cdata.stge_tx_tag);
  798         if (error != 0) {
  799                 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
  800                 goto fail;
  801         }
  802 
  803         /* create tag for Rx buffers. */
  804         error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
  805                     1, 0,                       /* algnmnt, boundary */
  806                     BUS_SPACE_MAXADDR,          /* lowaddr */
  807                     BUS_SPACE_MAXADDR,          /* highaddr */
  808                     NULL, NULL,                 /* filter, filterarg */
  809                     MCLBYTES,                   /* maxsize */
  810                     1,                          /* nsegments */
  811                     MCLBYTES,                   /* maxsegsize */
  812                     0,                          /* flags */
  813                     NULL, NULL,                 /* lockfunc, lockarg */
  814                     &sc->sc_cdata.stge_rx_tag);
  815         if (error != 0) {
  816                 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
  817                 goto fail;
  818         }
  819 
  820         /* allocate DMA'able memory and load the DMA map for Tx ring. */
  821         error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
  822             (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
  823             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
  824         if (error != 0) {
  825                 device_printf(sc->sc_dev,
  826                     "failed to allocate DMA'able memory for Tx ring\n");
  827                 goto fail;
  828         }
  829 
  830         ctx.stge_busaddr = 0;
  831         error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
  832             sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
  833             STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
  834         if (error != 0 || ctx.stge_busaddr == 0) {
  835                 device_printf(sc->sc_dev,
  836                     "failed to load DMA'able memory for Tx ring\n");
  837                 goto fail;
  838         }
  839         sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
  840 
  841         /* allocate DMA'able memory and load the DMA map for Rx ring. */
  842         error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
  843             (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
  844             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
  845         if (error != 0) {
  846                 device_printf(sc->sc_dev,
  847                     "failed to allocate DMA'able memory for Rx ring\n");
  848                 goto fail;
  849         }
  850 
  851         ctx.stge_busaddr = 0;
  852         error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
  853             sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
  854             STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
  855         if (error != 0 || ctx.stge_busaddr == 0) {
  856                 device_printf(sc->sc_dev,
  857                     "failed to load DMA'able memory for Rx ring\n");
  858                 goto fail;
  859         }
  860         sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
  861 
  862         /* create DMA maps for Tx buffers. */
  863         for (i = 0; i < STGE_TX_RING_CNT; i++) {
  864                 txd = &sc->sc_cdata.stge_txdesc[i];
  865                 txd->tx_m = NULL;
  866                 txd->tx_dmamap = 0;
  867                 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
  868                     &txd->tx_dmamap);
  869                 if (error != 0) {
  870                         device_printf(sc->sc_dev,
  871                             "failed to create Tx dmamap\n");
  872                         goto fail;
  873                 }
  874         }
  875         /* create DMA maps for Rx buffers. */
  876         if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
  877             &sc->sc_cdata.stge_rx_sparemap)) != 0) {
  878                 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
  879                 goto fail;
  880         }
  881         for (i = 0; i < STGE_RX_RING_CNT; i++) {
  882                 rxd = &sc->sc_cdata.stge_rxdesc[i];
  883                 rxd->rx_m = NULL;
  884                 rxd->rx_dmamap = 0;
  885                 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
  886                     &rxd->rx_dmamap);
  887                 if (error != 0) {
  888                         device_printf(sc->sc_dev,
  889                             "failed to create Rx dmamap\n");
  890                         goto fail;
  891                 }
  892         }
  893 
  894 fail:
  895         return (error);
  896 }
  897 
  898 static void
  899 stge_dma_free(struct stge_softc *sc)
  900 {
  901         struct stge_txdesc *txd;
  902         struct stge_rxdesc *rxd;
  903         int i;
  904 
  905         /* Tx ring */
  906         if (sc->sc_cdata.stge_tx_ring_tag) {
  907                 if (sc->sc_rdata.stge_tx_ring_paddr)
  908                         bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
  909                             sc->sc_cdata.stge_tx_ring_map);
  910                 if (sc->sc_rdata.stge_tx_ring)
  911                         bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
  912                             sc->sc_rdata.stge_tx_ring,
  913                             sc->sc_cdata.stge_tx_ring_map);
  914                 sc->sc_rdata.stge_tx_ring = NULL;
  915                 sc->sc_rdata.stge_tx_ring_paddr = 0;
  916                 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
  917                 sc->sc_cdata.stge_tx_ring_tag = NULL;
  918         }
  919         /* Rx ring */
  920         if (sc->sc_cdata.stge_rx_ring_tag) {
  921                 if (sc->sc_rdata.stge_rx_ring_paddr)
  922                         bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
  923                             sc->sc_cdata.stge_rx_ring_map);
  924                 if (sc->sc_rdata.stge_rx_ring)
  925                         bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
  926                             sc->sc_rdata.stge_rx_ring,
  927                             sc->sc_cdata.stge_rx_ring_map);
  928                 sc->sc_rdata.stge_rx_ring = NULL;
  929                 sc->sc_rdata.stge_rx_ring_paddr = 0;
  930                 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
  931                 sc->sc_cdata.stge_rx_ring_tag = NULL;
  932         }
  933         /* Tx buffers */
  934         if (sc->sc_cdata.stge_tx_tag) {
  935                 for (i = 0; i < STGE_TX_RING_CNT; i++) {
  936                         txd = &sc->sc_cdata.stge_txdesc[i];
  937                         if (txd->tx_dmamap) {
  938                                 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
  939                                     txd->tx_dmamap);
  940                                 txd->tx_dmamap = 0;
  941                         }
  942                 }
  943                 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
  944                 sc->sc_cdata.stge_tx_tag = NULL;
  945         }
  946         /* Rx buffers */
  947         if (sc->sc_cdata.stge_rx_tag) {
  948                 for (i = 0; i < STGE_RX_RING_CNT; i++) {
  949                         rxd = &sc->sc_cdata.stge_rxdesc[i];
  950                         if (rxd->rx_dmamap) {
  951                                 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
  952                                     rxd->rx_dmamap);
  953                                 rxd->rx_dmamap = 0;
  954                         }
  955                 }
  956                 if (sc->sc_cdata.stge_rx_sparemap) {
  957                         bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
  958                             sc->sc_cdata.stge_rx_sparemap);
  959                         sc->sc_cdata.stge_rx_sparemap = 0;
  960                 }
  961                 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
  962                 sc->sc_cdata.stge_rx_tag = NULL;
  963         }
  964 
  965         if (sc->sc_cdata.stge_parent_tag) {
  966                 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
  967                 sc->sc_cdata.stge_parent_tag = NULL;
  968         }
  969 }
  970 
  971 /*
  972  * stge_shutdown:
  973  *
  974  *      Make sure the interface is stopped at reboot time.
  975  */
  976 static int
  977 stge_shutdown(device_t dev)
  978 {
  979 
  980         return (stge_suspend(dev));
  981 }
  982 
  983 static void
  984 stge_setwol(struct stge_softc *sc)
  985 {
  986         struct ifnet *ifp;
  987         uint8_t v;
  988 
  989         STGE_LOCK_ASSERT(sc);
  990 
  991         ifp = sc->sc_ifp;
  992         v = CSR_READ_1(sc, STGE_WakeEvent);
  993         /* Disable all WOL bits. */
  994         v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
  995             WE_WakeOnLanEnable);
  996         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
  997                 v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
  998         CSR_WRITE_1(sc, STGE_WakeEvent, v);
  999         /* Reset Tx and prevent transmission. */
 1000         CSR_WRITE_4(sc, STGE_AsicCtrl,
 1001             CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
 1002         /*
 1003          * TC9021 automatically reset link speed to 100Mbps when it's put
 1004          * into sleep so there is no need to try to resetting link speed.
 1005          */
 1006 }
 1007 
 1008 static int
 1009 stge_suspend(device_t dev)
 1010 {
 1011         struct stge_softc *sc;
 1012 
 1013         sc = device_get_softc(dev);
 1014 
 1015         STGE_LOCK(sc);
 1016         stge_stop(sc);
 1017         sc->sc_suspended = 1;
 1018         stge_setwol(sc);
 1019         STGE_UNLOCK(sc);
 1020 
 1021         return (0);
 1022 }
 1023 
 1024 static int
 1025 stge_resume(device_t dev)
 1026 {
 1027         struct stge_softc *sc;
 1028         struct ifnet *ifp;
 1029         uint8_t v;
 1030 
 1031         sc = device_get_softc(dev);
 1032 
 1033         STGE_LOCK(sc);
 1034         /*
 1035          * Clear WOL bits, so special frames wouldn't interfere
 1036          * normal Rx operation anymore.
 1037          */
 1038         v = CSR_READ_1(sc, STGE_WakeEvent);
 1039         v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
 1040             WE_WakeOnLanEnable);
 1041         CSR_WRITE_1(sc, STGE_WakeEvent, v);
 1042         ifp = sc->sc_ifp;
 1043         if (ifp->if_flags & IFF_UP)
 1044                 stge_init_locked(sc);
 1045 
 1046         sc->sc_suspended = 0;
 1047         STGE_UNLOCK(sc);
 1048 
 1049         return (0);
 1050 }
 1051 
 1052 static void
 1053 stge_dma_wait(struct stge_softc *sc)
 1054 {
 1055         int i;
 1056 
 1057         for (i = 0; i < STGE_TIMEOUT; i++) {
 1058                 DELAY(2);
 1059                 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
 1060                         break;
 1061         }
 1062 
 1063         if (i == STGE_TIMEOUT)
 1064                 device_printf(sc->sc_dev, "DMA wait timed out\n");
 1065 }
 1066 
 1067 static int
 1068 stge_encap(struct stge_softc *sc, struct mbuf **m_head)
 1069 {
 1070         struct stge_txdesc *txd;
 1071         struct stge_tfd *tfd;
 1072         struct mbuf *m;
 1073         bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
 1074         int error, i, nsegs, si;
 1075         uint64_t csum_flags, tfc;
 1076 
 1077         STGE_LOCK_ASSERT(sc);
 1078 
 1079         if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
 1080                 return (ENOBUFS);
 1081 
 1082         error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
 1083             txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1084         if (error == EFBIG) {
 1085                 m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
 1086                 if (m == NULL) {
 1087                         m_freem(*m_head);
 1088                         *m_head = NULL;
 1089                         return (ENOMEM);
 1090                 }
 1091                 *m_head = m;
 1092                 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
 1093                     txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
 1094                 if (error != 0) {
 1095                         m_freem(*m_head);
 1096                         *m_head = NULL;
 1097                         return (error);
 1098                 }
 1099         } else if (error != 0)
 1100                 return (error);
 1101         if (nsegs == 0) {
 1102                 m_freem(*m_head);
 1103                 *m_head = NULL;
 1104                 return (EIO);
 1105         }
 1106 
 1107         m = *m_head;
 1108         csum_flags = 0;
 1109         if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
 1110                 if (m->m_pkthdr.csum_flags & CSUM_IP)
 1111                         csum_flags |= TFD_IPChecksumEnable;
 1112                 if (m->m_pkthdr.csum_flags & CSUM_TCP)
 1113                         csum_flags |= TFD_TCPChecksumEnable;
 1114                 else if (m->m_pkthdr.csum_flags & CSUM_UDP)
 1115                         csum_flags |= TFD_UDPChecksumEnable;
 1116         }
 1117 
 1118         si = sc->sc_cdata.stge_tx_prod;
 1119         tfd = &sc->sc_rdata.stge_tx_ring[si];
 1120         for (i = 0; i < nsegs; i++)
 1121                 tfd->tfd_frags[i].frag_word0 =
 1122                     htole64(FRAG_ADDR(txsegs[i].ds_addr) |
 1123                     FRAG_LEN(txsegs[i].ds_len));
 1124         sc->sc_cdata.stge_tx_cnt++;
 1125 
 1126         tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
 1127             TFD_FragCount(nsegs) | csum_flags;
 1128         if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
 1129                 tfc |= TFD_TxDMAIndicate;
 1130 
 1131         /* Update producer index. */
 1132         sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
 1133 
 1134         /* Check if we have a VLAN tag to insert. */
 1135         if (m->m_flags & M_VLANTAG)
 1136                 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
 1137         tfd->tfd_control = htole64(tfc);
 1138 
 1139         /* Update Tx Queue. */
 1140         STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
 1141         STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
 1142         txd->tx_m = m;
 1143 
 1144         /* Sync descriptors. */
 1145         bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
 1146             BUS_DMASYNC_PREWRITE);
 1147         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
 1148             sc->sc_cdata.stge_tx_ring_map,
 1149             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1150 
 1151         return (0);
 1152 }
 1153 
 1154 /*
 1155  * stge_start:          [ifnet interface function]
 1156  *
 1157  *      Start packet transmission on the interface.
 1158  */
 1159 static void
 1160 stge_start(struct ifnet *ifp)
 1161 {
 1162         struct stge_softc *sc;
 1163 
 1164         sc = ifp->if_softc;
 1165         STGE_LOCK(sc);
 1166         stge_start_locked(ifp);
 1167         STGE_UNLOCK(sc);
 1168 }
 1169 
 1170 static void
 1171 stge_start_locked(struct ifnet *ifp)
 1172 {
 1173         struct stge_softc *sc;
 1174         struct mbuf *m_head;
 1175         int enq;
 1176 
 1177         sc = ifp->if_softc;
 1178 
 1179         STGE_LOCK_ASSERT(sc);
 1180 
 1181         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
 1182             IFF_DRV_RUNNING || sc->sc_link == 0)
 1183                 return;
 1184 
 1185         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
 1186                 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
 1187                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1188                         break;
 1189                 }
 1190 
 1191                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 1192                 if (m_head == NULL)
 1193                         break;
 1194                 /*
 1195                  * Pack the data into the transmit ring. If we
 1196                  * don't have room, set the OACTIVE flag and wait
 1197                  * for the NIC to drain the ring.
 1198                  */
 1199                 if (stge_encap(sc, &m_head)) {
 1200                         if (m_head == NULL)
 1201                                 break;
 1202                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 1203                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1204                         break;
 1205                 }
 1206 
 1207                 enq++;
 1208                 /*
 1209                  * If there's a BPF listener, bounce a copy of this frame
 1210                  * to him.
 1211                  */
 1212                 ETHER_BPF_MTAP(ifp, m_head);
 1213         }
 1214 
 1215         if (enq > 0) {
 1216                 /* Transmit */
 1217                 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
 1218 
 1219                 /* Set a timeout in case the chip goes out to lunch. */
 1220                 sc->sc_watchdog_timer = 5;
 1221         }
 1222 }
 1223 
 1224 /*
 1225  * stge_watchdog:
 1226  *
 1227  *      Watchdog timer handler.
 1228  */
 1229 static void
 1230 stge_watchdog(struct stge_softc *sc)
 1231 {
 1232         struct ifnet *ifp;
 1233 
 1234         STGE_LOCK_ASSERT(sc);
 1235 
 1236         if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
 1237                 return;
 1238 
 1239         ifp = sc->sc_ifp;
 1240         if_printf(sc->sc_ifp, "device timeout\n");
 1241         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1242         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1243         stge_init_locked(sc);
 1244         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1245                 stge_start_locked(ifp);
 1246 }
 1247 
 1248 /*
 1249  * stge_ioctl:          [ifnet interface function]
 1250  *
 1251  *      Handle control requests from the operator.
 1252  */
 1253 static int
 1254 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1255 {
 1256         struct stge_softc *sc;
 1257         struct ifreq *ifr;
 1258         struct mii_data *mii;
 1259         int error, mask;
 1260 
 1261         sc = ifp->if_softc;
 1262         ifr = (struct ifreq *)data;
 1263         error = 0;
 1264         switch (cmd) {
 1265         case SIOCSIFMTU:
 1266                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
 1267                         error = EINVAL;
 1268                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1269                         ifp->if_mtu = ifr->ifr_mtu;
 1270                         STGE_LOCK(sc);
 1271                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1272                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1273                                 stge_init_locked(sc);
 1274                         }
 1275                         STGE_UNLOCK(sc);
 1276                 }
 1277                 break;
 1278         case SIOCSIFFLAGS:
 1279                 STGE_LOCK(sc);
 1280                 if ((ifp->if_flags & IFF_UP) != 0) {
 1281                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1282                                 if (((ifp->if_flags ^ sc->sc_if_flags)
 1283                                     & IFF_PROMISC) != 0)
 1284                                         stge_set_filter(sc);
 1285                         } else {
 1286                                 if (sc->sc_detach == 0)
 1287                                         stge_init_locked(sc);
 1288                         }
 1289                 } else {
 1290                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1291                                 stge_stop(sc);
 1292                 }
 1293                 sc->sc_if_flags = ifp->if_flags;
 1294                 STGE_UNLOCK(sc);
 1295                 break;
 1296         case SIOCADDMULTI:
 1297         case SIOCDELMULTI:
 1298                 STGE_LOCK(sc);
 1299                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1300                         stge_set_multi(sc);
 1301                 STGE_UNLOCK(sc);
 1302                 break;
 1303         case SIOCSIFMEDIA:
 1304         case SIOCGIFMEDIA:
 1305                 mii = device_get_softc(sc->sc_miibus);
 1306                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1307                 break;
 1308         case SIOCSIFCAP:
 1309                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1310 #ifdef DEVICE_POLLING
 1311                 if ((mask & IFCAP_POLLING) != 0) {
 1312                         if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
 1313                                 error = ether_poll_register(stge_poll, ifp);
 1314                                 if (error != 0)
 1315                                         break;
 1316                                 STGE_LOCK(sc);
 1317                                 CSR_WRITE_2(sc, STGE_IntEnable, 0);
 1318                                 ifp->if_capenable |= IFCAP_POLLING;
 1319                                 STGE_UNLOCK(sc);
 1320                         } else {
 1321                                 error = ether_poll_deregister(ifp);
 1322                                 if (error != 0)
 1323                                         break;
 1324                                 STGE_LOCK(sc);
 1325                                 CSR_WRITE_2(sc, STGE_IntEnable,
 1326                                     sc->sc_IntEnable);
 1327                                 ifp->if_capenable &= ~IFCAP_POLLING;
 1328                                 STGE_UNLOCK(sc);
 1329                         }
 1330                 }
 1331 #endif
 1332                 if ((mask & IFCAP_HWCSUM) != 0) {
 1333                         ifp->if_capenable ^= IFCAP_HWCSUM;
 1334                         if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
 1335                             (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
 1336                                 ifp->if_hwassist = STGE_CSUM_FEATURES;
 1337                         else
 1338                                 ifp->if_hwassist = 0;
 1339                 }
 1340                 if ((mask & IFCAP_WOL) != 0 &&
 1341                     (ifp->if_capabilities & IFCAP_WOL) != 0) {
 1342                         if ((mask & IFCAP_WOL_MAGIC) != 0)
 1343                                 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1344                 }
 1345                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
 1346                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1347                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1348                                 STGE_LOCK(sc);
 1349                                 stge_vlan_setup(sc);
 1350                                 STGE_UNLOCK(sc);
 1351                         }
 1352                 }
 1353                 VLAN_CAPABILITIES(ifp);
 1354                 break;
 1355         default:
 1356                 error = ether_ioctl(ifp, cmd, data);
 1357                 break;
 1358         }
 1359 
 1360         return (error);
 1361 }
 1362 
 1363 static void
 1364 stge_link_task(void *arg, int pending)
 1365 {
 1366         struct stge_softc *sc;
 1367         struct mii_data *mii;
 1368         uint32_t v, ac;
 1369         int i;
 1370 
 1371         sc = (struct stge_softc *)arg;
 1372         STGE_LOCK(sc);
 1373 
 1374         mii = device_get_softc(sc->sc_miibus);
 1375         if (mii->mii_media_status & IFM_ACTIVE) {
 1376                 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
 1377                         sc->sc_link = 1;
 1378         } else
 1379                 sc->sc_link = 0;
 1380 
 1381         sc->sc_MACCtrl = 0;
 1382         if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
 1383                 sc->sc_MACCtrl |= MC_DuplexSelect;
 1384         if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
 1385                 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
 1386         if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
 1387                 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
 1388         /*
 1389          * Update STGE_MACCtrl register depending on link status.
 1390          * (duplex, flow control etc)
 1391          */
 1392         v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 1393         v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
 1394         v |= sc->sc_MACCtrl;
 1395         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 1396         if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
 1397                 /* Duplex setting changed, reset Tx/Rx functions. */
 1398                 ac = CSR_READ_4(sc, STGE_AsicCtrl);
 1399                 ac |= AC_TxReset | AC_RxReset;
 1400                 CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
 1401                 for (i = 0; i < STGE_TIMEOUT; i++) {
 1402                         DELAY(100);
 1403                         if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
 1404                                 break;
 1405                 }
 1406                 if (i == STGE_TIMEOUT)
 1407                         device_printf(sc->sc_dev, "reset failed to complete\n");
 1408         }
 1409         STGE_UNLOCK(sc);
 1410 }
 1411 
 1412 static __inline int
 1413 stge_tx_error(struct stge_softc *sc)
 1414 {
 1415         uint32_t txstat;
 1416         int error;
 1417 
 1418         for (error = 0;;) {
 1419                 txstat = CSR_READ_4(sc, STGE_TxStatus);
 1420                 if ((txstat & TS_TxComplete) == 0)
 1421                         break;
 1422                 /* Tx underrun */
 1423                 if ((txstat & TS_TxUnderrun) != 0) {
 1424                         /*
 1425                          * XXX
 1426                          * There should be a more better way to recover
 1427                          * from Tx underrun instead of a full reset.
 1428                          */
 1429                         if (sc->sc_nerr++ < STGE_MAXERR)
 1430                                 device_printf(sc->sc_dev, "Tx underrun, "
 1431                                     "resetting...\n");
 1432                         if (sc->sc_nerr == STGE_MAXERR)
 1433                                 device_printf(sc->sc_dev, "too many errors; "
 1434                                     "not reporting any more\n");
 1435                         error = -1;
 1436                         break;
 1437                 }
 1438                 /* Maximum/Late collisions, Re-enable Tx MAC. */
 1439                 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
 1440                         CSR_WRITE_4(sc, STGE_MACCtrl,
 1441                             (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
 1442                             MC_TxEnable);
 1443         }
 1444 
 1445         return (error);
 1446 }
 1447 
 1448 /*
 1449  * stge_intr:
 1450  *
 1451  *      Interrupt service routine.
 1452  */
 1453 static void
 1454 stge_intr(void *arg)
 1455 {
 1456         struct stge_softc *sc;
 1457         struct ifnet *ifp;
 1458         int reinit;
 1459         uint16_t status;
 1460 
 1461         sc = (struct stge_softc *)arg;
 1462         ifp = sc->sc_ifp;
 1463 
 1464         STGE_LOCK(sc);
 1465 
 1466 #ifdef DEVICE_POLLING
 1467         if ((ifp->if_capenable & IFCAP_POLLING) != 0)
 1468                 goto done_locked;
 1469 #endif
 1470         status = CSR_READ_2(sc, STGE_IntStatus);
 1471         if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
 1472                 goto done_locked;
 1473 
 1474         /* Disable interrupts. */
 1475         for (reinit = 0;;) {
 1476                 status = CSR_READ_2(sc, STGE_IntStatusAck);
 1477                 status &= sc->sc_IntEnable;
 1478                 if (status == 0)
 1479                         break;
 1480                 /* Host interface errors. */
 1481                 if ((status & IS_HostError) != 0) {
 1482                         device_printf(sc->sc_dev,
 1483                             "Host interface error, resetting...\n");
 1484                         reinit = 1;
 1485                         goto force_init;
 1486                 }
 1487 
 1488                 /* Receive interrupts. */
 1489                 if ((status & IS_RxDMAComplete) != 0) {
 1490                         stge_rxeof(sc);
 1491                         if ((status & IS_RFDListEnd) != 0)
 1492                                 CSR_WRITE_4(sc, STGE_DMACtrl,
 1493                                     DMAC_RxDMAPollNow);
 1494                 }
 1495 
 1496                 /* Transmit interrupts. */
 1497                 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
 1498                         stge_txeof(sc);
 1499 
 1500                 /* Transmission errors.*/
 1501                 if ((status & IS_TxComplete) != 0) {
 1502                         if ((reinit = stge_tx_error(sc)) != 0)
 1503                                 break;
 1504                 }
 1505         }
 1506 
 1507 force_init:
 1508         if (reinit != 0) {
 1509                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1510                 stge_init_locked(sc);
 1511         }
 1512 
 1513         /* Re-enable interrupts. */
 1514         CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
 1515 
 1516         /* Try to get more packets going. */
 1517         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1518                 stge_start_locked(ifp);
 1519 
 1520 done_locked:
 1521         STGE_UNLOCK(sc);
 1522 }
 1523 
 1524 /*
 1525  * stge_txeof:
 1526  *
 1527  *      Helper; handle transmit interrupts.
 1528  */
 1529 static void
 1530 stge_txeof(struct stge_softc *sc)
 1531 {
 1532         struct ifnet *ifp;
 1533         struct stge_txdesc *txd;
 1534         uint64_t control;
 1535         int cons;
 1536 
 1537         STGE_LOCK_ASSERT(sc);
 1538 
 1539         ifp = sc->sc_ifp;
 1540 
 1541         txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
 1542         if (txd == NULL)
 1543                 return;
 1544         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
 1545             sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
 1546 
 1547         /*
 1548          * Go through our Tx list and free mbufs for those
 1549          * frames which have been transmitted.
 1550          */
 1551         for (cons = sc->sc_cdata.stge_tx_cons;;
 1552             cons = (cons + 1) % STGE_TX_RING_CNT) {
 1553                 if (sc->sc_cdata.stge_tx_cnt <= 0)
 1554                         break;
 1555                 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
 1556                 if ((control & TFD_TFDDone) == 0)
 1557                         break;
 1558                 sc->sc_cdata.stge_tx_cnt--;
 1559                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1560 
 1561                 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
 1562                     BUS_DMASYNC_POSTWRITE);
 1563                 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
 1564 
 1565                 /* Output counter is updated with statistics register */
 1566                 m_freem(txd->tx_m);
 1567                 txd->tx_m = NULL;
 1568                 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
 1569                 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
 1570                 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
 1571         }
 1572         sc->sc_cdata.stge_tx_cons = cons;
 1573         if (sc->sc_cdata.stge_tx_cnt == 0)
 1574                 sc->sc_watchdog_timer = 0;
 1575 
 1576         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
 1577             sc->sc_cdata.stge_tx_ring_map,
 1578             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1579 }
 1580 
 1581 static __inline void
 1582 stge_discard_rxbuf(struct stge_softc *sc, int idx)
 1583 {
 1584         struct stge_rfd *rfd;
 1585 
 1586         rfd = &sc->sc_rdata.stge_rx_ring[idx];
 1587         rfd->rfd_status = 0;
 1588 }
 1589 
 1590 #ifndef __NO_STRICT_ALIGNMENT
 1591 /*
 1592  * It seems that TC9021's DMA engine has alignment restrictions in
 1593  * DMA scatter operations. The first DMA segment has no address
 1594  * alignment restrictins but the rest should be aligned on 4(?) bytes
 1595  * boundary. Otherwise it would corrupt random memory. Since we don't
 1596  * know which one is used for the first segment in advance we simply
 1597  * don't align at all.
 1598  * To avoid copying over an entire frame to align, we allocate a new
 1599  * mbuf and copy ethernet header to the new mbuf. The new mbuf is
 1600  * prepended into the existing mbuf chain.
 1601  */
 1602 static __inline struct mbuf *
 1603 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
 1604 {
 1605         struct mbuf *n;
 1606 
 1607         n = NULL;
 1608         if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
 1609                 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
 1610                 m->m_data += ETHER_HDR_LEN;
 1611                 n = m;
 1612         } else {
 1613                 MGETHDR(n, M_NOWAIT, MT_DATA);
 1614                 if (n != NULL) {
 1615                         bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
 1616                         m->m_data += ETHER_HDR_LEN;
 1617                         m->m_len -= ETHER_HDR_LEN;
 1618                         n->m_len = ETHER_HDR_LEN;
 1619                         M_MOVE_PKTHDR(n, m);
 1620                         n->m_next = m;
 1621                 } else
 1622                         m_freem(m);
 1623         }
 1624 
 1625         return (n);
 1626 }
 1627 #endif
 1628 
 1629 /*
 1630  * stge_rxeof:
 1631  *
 1632  *      Helper; handle receive interrupts.
 1633  */
 1634 static int
 1635 stge_rxeof(struct stge_softc *sc)
 1636 {
 1637         struct ifnet *ifp;
 1638         struct stge_rxdesc *rxd;
 1639         struct mbuf *mp, *m;
 1640         uint64_t status64;
 1641         uint32_t status;
 1642         int cons, prog, rx_npkts;
 1643 
 1644         STGE_LOCK_ASSERT(sc);
 1645 
 1646         rx_npkts = 0;
 1647         ifp = sc->sc_ifp;
 1648 
 1649         bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
 1650             sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
 1651 
 1652         prog = 0;
 1653         for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
 1654             prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
 1655                 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
 1656                 status = RFD_RxStatus(status64);
 1657                 if ((status & RFD_RFDDone) == 0)
 1658                         break;
 1659 #ifdef DEVICE_POLLING
 1660                 if (ifp->if_capenable & IFCAP_POLLING) {
 1661                         if (sc->sc_cdata.stge_rxcycles <= 0)
 1662                                 break;
 1663                         sc->sc_cdata.stge_rxcycles--;
 1664                 }
 1665 #endif
 1666                 prog++;
 1667                 rxd = &sc->sc_cdata.stge_rxdesc[cons];
 1668                 mp = rxd->rx_m;
 1669 
 1670                 /*
 1671                  * If the packet had an error, drop it.  Note we count
 1672                  * the error later in the periodic stats update.
 1673                  */
 1674                 if ((status & RFD_FrameEnd) != 0 && (status &
 1675                     (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
 1676                     RFD_RxAlignmentError | RFD_RxFCSError |
 1677                     RFD_RxLengthError)) != 0) {
 1678                         stge_discard_rxbuf(sc, cons);
 1679                         if (sc->sc_cdata.stge_rxhead != NULL) {
 1680                                 m_freem(sc->sc_cdata.stge_rxhead);
 1681                                 STGE_RXCHAIN_RESET(sc);
 1682                         }
 1683                         continue;
 1684                 }
 1685                 /*
 1686                  * Add a new receive buffer to the ring.
 1687                  */
 1688                 if (stge_newbuf(sc, cons) != 0) {
 1689                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 1690                         stge_discard_rxbuf(sc, cons);
 1691                         if (sc->sc_cdata.stge_rxhead != NULL) {
 1692                                 m_freem(sc->sc_cdata.stge_rxhead);
 1693                                 STGE_RXCHAIN_RESET(sc);
 1694                         }
 1695                         continue;
 1696                 }
 1697 
 1698                 if ((status & RFD_FrameEnd) != 0)
 1699                         mp->m_len = RFD_RxDMAFrameLen(status) -
 1700                             sc->sc_cdata.stge_rxlen;
 1701                 sc->sc_cdata.stge_rxlen += mp->m_len;
 1702 
 1703                 /* Chain mbufs. */
 1704                 if (sc->sc_cdata.stge_rxhead == NULL) {
 1705                         sc->sc_cdata.stge_rxhead = mp;
 1706                         sc->sc_cdata.stge_rxtail = mp;
 1707                 } else {
 1708                         mp->m_flags &= ~M_PKTHDR;
 1709                         sc->sc_cdata.stge_rxtail->m_next = mp;
 1710                         sc->sc_cdata.stge_rxtail = mp;
 1711                 }
 1712 
 1713                 if ((status & RFD_FrameEnd) != 0) {
 1714                         m = sc->sc_cdata.stge_rxhead;
 1715                         m->m_pkthdr.rcvif = ifp;
 1716                         m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
 1717 
 1718                         if (m->m_pkthdr.len > sc->sc_if_framesize) {
 1719                                 m_freem(m);
 1720                                 STGE_RXCHAIN_RESET(sc);
 1721                                 continue;
 1722                         }
 1723                         /*
 1724                          * Set the incoming checksum information for
 1725                          * the packet.
 1726                          */
 1727                         if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 1728                                 if ((status & RFD_IPDetected) != 0) {
 1729                                         m->m_pkthdr.csum_flags |=
 1730                                                 CSUM_IP_CHECKED;
 1731                                         if ((status & RFD_IPError) == 0)
 1732                                                 m->m_pkthdr.csum_flags |=
 1733                                                     CSUM_IP_VALID;
 1734                                 }
 1735                                 if (((status & RFD_TCPDetected) != 0 &&
 1736                                     (status & RFD_TCPError) == 0) ||
 1737                                     ((status & RFD_UDPDetected) != 0 &&
 1738                                     (status & RFD_UDPError) == 0)) {
 1739                                         m->m_pkthdr.csum_flags |=
 1740                                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
 1741                                         m->m_pkthdr.csum_data = 0xffff;
 1742                                 }
 1743                         }
 1744 
 1745 #ifndef __NO_STRICT_ALIGNMENT
 1746                         if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
 1747                                 if ((m = stge_fixup_rx(sc, m)) == NULL) {
 1748                                         STGE_RXCHAIN_RESET(sc);
 1749                                         continue;
 1750                                 }
 1751                         }
 1752 #endif
 1753                         /* Check for VLAN tagged packets. */
 1754                         if ((status & RFD_VLANDetected) != 0 &&
 1755                             (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 1756                                 m->m_pkthdr.ether_vtag = RFD_TCI(status64);
 1757                                 m->m_flags |= M_VLANTAG;
 1758                         }
 1759 
 1760                         STGE_UNLOCK(sc);
 1761                         /* Pass it on. */
 1762                         (*ifp->if_input)(ifp, m);
 1763                         STGE_LOCK(sc);
 1764                         rx_npkts++;
 1765 
 1766                         STGE_RXCHAIN_RESET(sc);
 1767                 }
 1768         }
 1769 
 1770         if (prog > 0) {
 1771                 /* Update the consumer index. */
 1772                 sc->sc_cdata.stge_rx_cons = cons;
 1773                 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
 1774                     sc->sc_cdata.stge_rx_ring_map,
 1775                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1776         }
 1777         return (rx_npkts);
 1778 }
 1779 
 1780 #ifdef DEVICE_POLLING
 1781 static int
 1782 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 1783 {
 1784         struct stge_softc *sc;
 1785         uint16_t status;
 1786         int rx_npkts;
 1787 
 1788         rx_npkts = 0;
 1789         sc = ifp->if_softc;
 1790         STGE_LOCK(sc);
 1791         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
 1792                 STGE_UNLOCK(sc);
 1793                 return (rx_npkts);
 1794         }
 1795 
 1796         sc->sc_cdata.stge_rxcycles = count;
 1797         rx_npkts = stge_rxeof(sc);
 1798         stge_txeof(sc);
 1799 
 1800         if (cmd == POLL_AND_CHECK_STATUS) {
 1801                 status = CSR_READ_2(sc, STGE_IntStatus);
 1802                 status &= sc->sc_IntEnable;
 1803                 if (status != 0) {
 1804                         if ((status & IS_HostError) != 0) {
 1805                                 device_printf(sc->sc_dev,
 1806                                     "Host interface error, resetting...\n");
 1807                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1808                                 stge_init_locked(sc);
 1809                         }
 1810                         if ((status & IS_TxComplete) != 0) {
 1811                                 if (stge_tx_error(sc) != 0) {
 1812                                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1813                                         stge_init_locked(sc);
 1814                                 }
 1815                         }
 1816                 }
 1817         }
 1818 
 1819         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 1820                 stge_start_locked(ifp);
 1821 
 1822         STGE_UNLOCK(sc);
 1823         return (rx_npkts);
 1824 }
 1825 #endif  /* DEVICE_POLLING */
 1826 
 1827 /*
 1828  * stge_tick:
 1829  *
 1830  *      One second timer, used to tick the MII.
 1831  */
 1832 static void
 1833 stge_tick(void *arg)
 1834 {
 1835         struct stge_softc *sc;
 1836         struct mii_data *mii;
 1837 
 1838         sc = (struct stge_softc *)arg;
 1839 
 1840         STGE_LOCK_ASSERT(sc);
 1841 
 1842         mii = device_get_softc(sc->sc_miibus);
 1843         mii_tick(mii);
 1844 
 1845         /* Update statistics counters. */
 1846         stge_stats_update(sc);
 1847 
 1848         /*
 1849          * Relcaim any pending Tx descriptors to release mbufs in a
 1850          * timely manner as we don't generate Tx completion interrupts
 1851          * for every frame. This limits the delay to a maximum of one
 1852          * second.
 1853          */
 1854         if (sc->sc_cdata.stge_tx_cnt != 0)
 1855                 stge_txeof(sc);
 1856 
 1857         stge_watchdog(sc);
 1858 
 1859         callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
 1860 }
 1861 
 1862 /*
 1863  * stge_stats_update:
 1864  *
 1865  *      Read the TC9021 statistics counters.
 1866  */
 1867 static void
 1868 stge_stats_update(struct stge_softc *sc)
 1869 {
 1870         struct ifnet *ifp;
 1871 
 1872         STGE_LOCK_ASSERT(sc);
 1873 
 1874         ifp = sc->sc_ifp;
 1875 
 1876         CSR_READ_4(sc,STGE_OctetRcvOk);
 1877 
 1878         if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk));
 1879 
 1880         if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors));
 1881 
 1882         CSR_READ_4(sc, STGE_OctetXmtdOk);
 1883 
 1884         if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk));
 1885 
 1886         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
 1887             CSR_READ_4(sc, STGE_LateCollisions) +
 1888             CSR_READ_4(sc, STGE_MultiColFrames) +
 1889             CSR_READ_4(sc, STGE_SingleColFrames));
 1890 
 1891         if_inc_counter(ifp, IFCOUNTER_OERRORS,
 1892             CSR_READ_2(sc, STGE_FramesAbortXSColls) +
 1893             CSR_READ_2(sc, STGE_FramesWEXDeferal));
 1894 }
 1895 
 1896 /*
 1897  * stge_reset:
 1898  *
 1899  *      Perform a soft reset on the TC9021.
 1900  */
 1901 static void
 1902 stge_reset(struct stge_softc *sc, uint32_t how)
 1903 {
 1904         uint32_t ac;
 1905         uint8_t v;
 1906         int i, dv;
 1907 
 1908         STGE_LOCK_ASSERT(sc);
 1909 
 1910         dv = 5000;
 1911         ac = CSR_READ_4(sc, STGE_AsicCtrl);
 1912         switch (how) {
 1913         case STGE_RESET_TX:
 1914                 ac |= AC_TxReset | AC_FIFO;
 1915                 dv = 100;
 1916                 break;
 1917         case STGE_RESET_RX:
 1918                 ac |= AC_RxReset | AC_FIFO;
 1919                 dv = 100;
 1920                 break;
 1921         case STGE_RESET_FULL:
 1922         default:
 1923                 /*
 1924                  * Only assert RstOut if we're fiber.  We need GMII clocks
 1925                  * to be present in order for the reset to complete on fiber
 1926                  * cards.
 1927                  */
 1928                 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
 1929                     AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
 1930                     (sc->sc_usefiber ? AC_RstOut : 0);
 1931                 break;
 1932         }
 1933 
 1934         CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
 1935 
 1936         /* Account for reset problem at 10Mbps. */
 1937         DELAY(dv);
 1938 
 1939         for (i = 0; i < STGE_TIMEOUT; i++) {
 1940                 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
 1941                         break;
 1942                 DELAY(dv);
 1943         }
 1944 
 1945         if (i == STGE_TIMEOUT)
 1946                 device_printf(sc->sc_dev, "reset failed to complete\n");
 1947 
 1948         /* Set LED, from Linux IPG driver. */
 1949         ac = CSR_READ_4(sc, STGE_AsicCtrl);
 1950         ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
 1951         if ((sc->sc_led & 0x01) != 0)
 1952                 ac |= AC_LEDMode;
 1953         if ((sc->sc_led & 0x03) != 0)
 1954                 ac |= AC_LEDModeBit1;
 1955         if ((sc->sc_led & 0x08) != 0)
 1956                 ac |= AC_LEDSpeed;
 1957         CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
 1958 
 1959         /* Set PHY, from Linux IPG driver */
 1960         v = CSR_READ_1(sc, STGE_PhySet);
 1961         v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
 1962         v |= ((sc->sc_led & 0x70) >> 4);
 1963         CSR_WRITE_1(sc, STGE_PhySet, v);
 1964 }
 1965 
 1966 /*
 1967  * stge_init:           [ ifnet interface function ]
 1968  *
 1969  *      Initialize the interface.
 1970  */
 1971 static void
 1972 stge_init(void *xsc)
 1973 {
 1974         struct stge_softc *sc;
 1975 
 1976         sc = (struct stge_softc *)xsc;
 1977         STGE_LOCK(sc);
 1978         stge_init_locked(sc);
 1979         STGE_UNLOCK(sc);
 1980 }
 1981 
 1982 static void
 1983 stge_init_locked(struct stge_softc *sc)
 1984 {
 1985         struct ifnet *ifp;
 1986         struct mii_data *mii;
 1987         uint16_t eaddr[3];
 1988         uint32_t v;
 1989         int error;
 1990 
 1991         STGE_LOCK_ASSERT(sc);
 1992 
 1993         ifp = sc->sc_ifp;
 1994         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1995                 return;
 1996         mii = device_get_softc(sc->sc_miibus);
 1997 
 1998         /*
 1999          * Cancel any pending I/O.
 2000          */
 2001         stge_stop(sc);
 2002 
 2003         /*
 2004          * Reset the chip to a known state.
 2005          */
 2006         stge_reset(sc, STGE_RESET_FULL);
 2007 
 2008         /* Init descriptors. */
 2009         error = stge_init_rx_ring(sc);
 2010         if (error != 0) {
 2011                 device_printf(sc->sc_dev,
 2012                     "initialization failed: no memory for rx buffers\n");
 2013                 stge_stop(sc);
 2014                 goto out;
 2015         }
 2016         stge_init_tx_ring(sc);
 2017 
 2018         /* Set the station address. */
 2019         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
 2020         CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
 2021         CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
 2022         CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
 2023 
 2024         /*
 2025          * Set the statistics masks.  Disable all the RMON stats,
 2026          * and disable selected stats in the non-RMON stats registers.
 2027          */
 2028         CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
 2029         CSR_WRITE_4(sc, STGE_StatisticsMask,
 2030             (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
 2031             (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
 2032             (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
 2033             (1U << 21));
 2034 
 2035         /* Set up the receive filter. */
 2036         stge_set_filter(sc);
 2037         /* Program multicast filter. */
 2038         stge_set_multi(sc);
 2039 
 2040         /*
 2041          * Give the transmit and receive ring to the chip.
 2042          */
 2043         CSR_WRITE_4(sc, STGE_TFDListPtrHi,
 2044             STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
 2045         CSR_WRITE_4(sc, STGE_TFDListPtrLo,
 2046             STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
 2047 
 2048         CSR_WRITE_4(sc, STGE_RFDListPtrHi,
 2049             STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
 2050         CSR_WRITE_4(sc, STGE_RFDListPtrLo,
 2051             STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
 2052 
 2053         /*
 2054          * Initialize the Tx auto-poll period.  It's OK to make this number
 2055          * large (255 is the max, but we use 127) -- we explicitly kick the
 2056          * transmit engine when there's actually a packet.
 2057          */
 2058         CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
 2059 
 2060         /* ..and the Rx auto-poll period. */
 2061         CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
 2062 
 2063         /* Initialize the Tx start threshold. */
 2064         CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
 2065 
 2066         /* Rx DMA thresholds, from Linux */
 2067         CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
 2068         CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
 2069 
 2070         /* Rx early threhold, from Linux */
 2071         CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
 2072 
 2073         /* Tx DMA thresholds, from Linux */
 2074         CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
 2075         CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
 2076 
 2077         /*
 2078          * Initialize the Rx DMA interrupt control register.  We
 2079          * request an interrupt after every incoming packet, but
 2080          * defer it for sc_rxint_dmawait us. When the number of
 2081          * interrupts pending reaches STGE_RXINT_NFRAME, we stop
 2082          * deferring the interrupt, and signal it immediately.
 2083          */
 2084         CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
 2085             RDIC_RxFrameCount(sc->sc_rxint_nframe) |
 2086             RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
 2087 
 2088         /*
 2089          * Initialize the interrupt mask.
 2090          */
 2091         sc->sc_IntEnable = IS_HostError | IS_TxComplete |
 2092             IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
 2093 #ifdef DEVICE_POLLING
 2094         /* Disable interrupts if we are polling. */
 2095         if ((ifp->if_capenable & IFCAP_POLLING) != 0)
 2096                 CSR_WRITE_2(sc, STGE_IntEnable, 0);
 2097         else
 2098 #endif
 2099         CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
 2100 
 2101         /*
 2102          * Configure the DMA engine.
 2103          * XXX Should auto-tune TxBurstLimit.
 2104          */
 2105         CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
 2106 
 2107         /*
 2108          * Send a PAUSE frame when we reach 29,696 bytes in the Rx
 2109          * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
 2110          * in the Rx FIFO.
 2111          */
 2112         CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
 2113         CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
 2114 
 2115         /*
 2116          * Set the maximum frame size.
 2117          */
 2118         sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 2119         CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
 2120 
 2121         /*
 2122          * Initialize MacCtrl -- do it before setting the media,
 2123          * as setting the media will actually program the register.
 2124          *
 2125          * Note: We have to poke the IFS value before poking
 2126          * anything else.
 2127          */
 2128         /* Tx/Rx MAC should be disabled before programming IFS.*/
 2129         CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
 2130 
 2131         stge_vlan_setup(sc);
 2132 
 2133         if (sc->sc_rev >= 6) {          /* >= B.2 */
 2134                 /* Multi-frag frame bug work-around. */
 2135                 CSR_WRITE_2(sc, STGE_DebugCtrl,
 2136                     CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
 2137 
 2138                 /* Tx Poll Now bug work-around. */
 2139                 CSR_WRITE_2(sc, STGE_DebugCtrl,
 2140                     CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
 2141                 /* Tx Poll Now bug work-around. */
 2142                 CSR_WRITE_2(sc, STGE_DebugCtrl,
 2143                     CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
 2144         }
 2145 
 2146         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2147         v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
 2148         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2149         /*
 2150          * It seems that transmitting frames without checking the state of
 2151          * Rx/Tx MAC wedge the hardware.
 2152          */
 2153         stge_start_tx(sc);
 2154         stge_start_rx(sc);
 2155 
 2156         sc->sc_link = 0;
 2157         /*
 2158          * Set the current media.
 2159          */
 2160         mii_mediachg(mii);
 2161 
 2162         /*
 2163          * Start the one second MII clock.
 2164          */
 2165         callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
 2166 
 2167         /*
 2168          * ...all done!
 2169          */
 2170         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2171         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2172 
 2173  out:
 2174         if (error != 0)
 2175                 device_printf(sc->sc_dev, "interface not running\n");
 2176 }
 2177 
 2178 static void
 2179 stge_vlan_setup(struct stge_softc *sc)
 2180 {
 2181         struct ifnet *ifp;
 2182         uint32_t v;
 2183 
 2184         ifp = sc->sc_ifp;
 2185         /*
 2186          * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
 2187          * MC_AutoVLANuntagging bit.
 2188          * MC_AutoVLANtagging bit selects which VLAN source to use
 2189          * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
 2190          * bit has priority over MC_AutoVLANtagging bit. So we always
 2191          * use TFC instead of STGE_VLANTag register.
 2192          */
 2193         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2194         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2195                 v |= MC_AutoVLANuntagging;
 2196         else
 2197                 v &= ~MC_AutoVLANuntagging;
 2198         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2199 }
 2200 
 2201 /*
 2202  *      Stop transmission on the interface.
 2203  */
 2204 static void
 2205 stge_stop(struct stge_softc *sc)
 2206 {
 2207         struct ifnet *ifp;
 2208         struct stge_txdesc *txd;
 2209         struct stge_rxdesc *rxd;
 2210         uint32_t v;
 2211         int i;
 2212 
 2213         STGE_LOCK_ASSERT(sc);
 2214         /*
 2215          * Stop the one second clock.
 2216          */
 2217         callout_stop(&sc->sc_tick_ch);
 2218         sc->sc_watchdog_timer = 0;
 2219 
 2220         /*
 2221          * Disable interrupts.
 2222          */
 2223         CSR_WRITE_2(sc, STGE_IntEnable, 0);
 2224 
 2225         /*
 2226          * Stop receiver, transmitter, and stats update.
 2227          */
 2228         stge_stop_rx(sc);
 2229         stge_stop_tx(sc);
 2230         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2231         v |= MC_StatisticsDisable;
 2232         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2233 
 2234         /*
 2235          * Stop the transmit and receive DMA.
 2236          */
 2237         stge_dma_wait(sc);
 2238         CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
 2239         CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
 2240         CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
 2241         CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
 2242 
 2243         /*
 2244          * Free RX and TX mbufs still in the queues.
 2245          */
 2246         for (i = 0; i < STGE_RX_RING_CNT; i++) {
 2247                 rxd = &sc->sc_cdata.stge_rxdesc[i];
 2248                 if (rxd->rx_m != NULL) {
 2249                         bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
 2250                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 2251                         bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
 2252                             rxd->rx_dmamap);
 2253                         m_freem(rxd->rx_m);
 2254                         rxd->rx_m = NULL;
 2255                 }
 2256         }
 2257         for (i = 0; i < STGE_TX_RING_CNT; i++) {
 2258                 txd = &sc->sc_cdata.stge_txdesc[i];
 2259                 if (txd->tx_m != NULL) {
 2260                         bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
 2261                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 2262                         bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
 2263                             txd->tx_dmamap);
 2264                         m_freem(txd->tx_m);
 2265                         txd->tx_m = NULL;
 2266                 }
 2267         }
 2268 
 2269         /*
 2270          * Mark the interface down and cancel the watchdog timer.
 2271          */
 2272         ifp = sc->sc_ifp;
 2273         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 2274         sc->sc_link = 0;
 2275 }
 2276 
 2277 static void
 2278 stge_start_tx(struct stge_softc *sc)
 2279 {
 2280         uint32_t v;
 2281         int i;
 2282 
 2283         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2284         if ((v & MC_TxEnabled) != 0)
 2285                 return;
 2286         v |= MC_TxEnable;
 2287         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2288         CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
 2289         for (i = STGE_TIMEOUT; i > 0; i--) {
 2290                 DELAY(10);
 2291                 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2292                 if ((v & MC_TxEnabled) != 0)
 2293                         break;
 2294         }
 2295         if (i == 0)
 2296                 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
 2297 }
 2298 
 2299 static void
 2300 stge_start_rx(struct stge_softc *sc)
 2301 {
 2302         uint32_t v;
 2303         int i;
 2304 
 2305         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2306         if ((v & MC_RxEnabled) != 0)
 2307                 return;
 2308         v |= MC_RxEnable;
 2309         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2310         CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
 2311         for (i = STGE_TIMEOUT; i > 0; i--) {
 2312                 DELAY(10);
 2313                 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2314                 if ((v & MC_RxEnabled) != 0)
 2315                         break;
 2316         }
 2317         if (i == 0)
 2318                 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
 2319 }
 2320 
 2321 static void
 2322 stge_stop_tx(struct stge_softc *sc)
 2323 {
 2324         uint32_t v;
 2325         int i;
 2326 
 2327         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2328         if ((v & MC_TxEnabled) == 0)
 2329                 return;
 2330         v |= MC_TxDisable;
 2331         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2332         for (i = STGE_TIMEOUT; i > 0; i--) {
 2333                 DELAY(10);
 2334                 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2335                 if ((v & MC_TxEnabled) == 0)
 2336                         break;
 2337         }
 2338         if (i == 0)
 2339                 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
 2340 }
 2341 
 2342 static void
 2343 stge_stop_rx(struct stge_softc *sc)
 2344 {
 2345         uint32_t v;
 2346         int i;
 2347 
 2348         v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2349         if ((v & MC_RxEnabled) == 0)
 2350                 return;
 2351         v |= MC_RxDisable;
 2352         CSR_WRITE_4(sc, STGE_MACCtrl, v);
 2353         for (i = STGE_TIMEOUT; i > 0; i--) {
 2354                 DELAY(10);
 2355                 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
 2356                 if ((v & MC_RxEnabled) == 0)
 2357                         break;
 2358         }
 2359         if (i == 0)
 2360                 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
 2361 }
 2362 
 2363 static void
 2364 stge_init_tx_ring(struct stge_softc *sc)
 2365 {
 2366         struct stge_ring_data *rd;
 2367         struct stge_txdesc *txd;
 2368         bus_addr_t addr;
 2369         int i;
 2370 
 2371         STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
 2372         STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
 2373 
 2374         sc->sc_cdata.stge_tx_prod = 0;
 2375         sc->sc_cdata.stge_tx_cons = 0;
 2376         sc->sc_cdata.stge_tx_cnt = 0;
 2377 
 2378         rd = &sc->sc_rdata;
 2379         bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
 2380         for (i = 0; i < STGE_TX_RING_CNT; i++) {
 2381                 if (i == (STGE_TX_RING_CNT - 1))
 2382                         addr = STGE_TX_RING_ADDR(sc, 0);
 2383                 else
 2384                         addr = STGE_TX_RING_ADDR(sc, i + 1);
 2385                 rd->stge_tx_ring[i].tfd_next = htole64(addr);
 2386                 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
 2387                 txd = &sc->sc_cdata.stge_txdesc[i];
 2388                 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
 2389         }
 2390 
 2391         bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
 2392             sc->sc_cdata.stge_tx_ring_map,
 2393             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2394 
 2395 }
 2396 
 2397 static int
 2398 stge_init_rx_ring(struct stge_softc *sc)
 2399 {
 2400         struct stge_ring_data *rd;
 2401         bus_addr_t addr;
 2402         int i;
 2403 
 2404         sc->sc_cdata.stge_rx_cons = 0;
 2405         STGE_RXCHAIN_RESET(sc);
 2406 
 2407         rd = &sc->sc_rdata;
 2408         bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
 2409         for (i = 0; i < STGE_RX_RING_CNT; i++) {
 2410                 if (stge_newbuf(sc, i) != 0)
 2411                         return (ENOBUFS);
 2412                 if (i == (STGE_RX_RING_CNT - 1))
 2413                         addr = STGE_RX_RING_ADDR(sc, 0);
 2414                 else
 2415                         addr = STGE_RX_RING_ADDR(sc, i + 1);
 2416                 rd->stge_rx_ring[i].rfd_next = htole64(addr);
 2417                 rd->stge_rx_ring[i].rfd_status = 0;
 2418         }
 2419 
 2420         bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
 2421             sc->sc_cdata.stge_rx_ring_map,
 2422             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2423 
 2424         return (0);
 2425 }
 2426 
 2427 /*
 2428  * stge_newbuf:
 2429  *
 2430  *      Add a receive buffer to the indicated descriptor.
 2431  */
 2432 static int
 2433 stge_newbuf(struct stge_softc *sc, int idx)
 2434 {
 2435         struct stge_rxdesc *rxd;
 2436         struct stge_rfd *rfd;
 2437         struct mbuf *m;
 2438         bus_dma_segment_t segs[1];
 2439         bus_dmamap_t map;
 2440         int nsegs;
 2441 
 2442         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 2443         if (m == NULL)
 2444                 return (ENOBUFS);
 2445         m->m_len = m->m_pkthdr.len = MCLBYTES;
 2446         /*
 2447          * The hardware requires 4bytes aligned DMA address when JUMBO
 2448          * frame is used.
 2449          */
 2450         if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
 2451                 m_adj(m, ETHER_ALIGN);
 2452 
 2453         if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
 2454             sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 2455                 m_freem(m);
 2456                 return (ENOBUFS);
 2457         }
 2458         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 2459 
 2460         rxd = &sc->sc_cdata.stge_rxdesc[idx];
 2461         if (rxd->rx_m != NULL) {
 2462                 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
 2463                     BUS_DMASYNC_POSTREAD);
 2464                 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
 2465         }
 2466         map = rxd->rx_dmamap;
 2467         rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
 2468         sc->sc_cdata.stge_rx_sparemap = map;
 2469         bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
 2470             BUS_DMASYNC_PREREAD);
 2471         rxd->rx_m = m;
 2472 
 2473         rfd = &sc->sc_rdata.stge_rx_ring[idx];
 2474         rfd->rfd_frag.frag_word0 =
 2475             htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
 2476         rfd->rfd_status = 0;
 2477 
 2478         return (0);
 2479 }
 2480 
 2481 /*
 2482  * stge_set_filter:
 2483  *
 2484  *      Set up the receive filter.
 2485  */
 2486 static void
 2487 stge_set_filter(struct stge_softc *sc)
 2488 {
 2489         struct ifnet *ifp;
 2490         uint16_t mode;
 2491 
 2492         STGE_LOCK_ASSERT(sc);
 2493 
 2494         ifp = sc->sc_ifp;
 2495 
 2496         mode = CSR_READ_2(sc, STGE_ReceiveMode);
 2497         mode |= RM_ReceiveUnicast;
 2498         if ((ifp->if_flags & IFF_BROADCAST) != 0)
 2499                 mode |= RM_ReceiveBroadcast;
 2500         else
 2501                 mode &= ~RM_ReceiveBroadcast;
 2502         if ((ifp->if_flags & IFF_PROMISC) != 0)
 2503                 mode |= RM_ReceiveAllFrames;
 2504         else
 2505                 mode &= ~RM_ReceiveAllFrames;
 2506 
 2507         CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
 2508 }
 2509 
 2510 static u_int
 2511 stge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
 2512 {
 2513         uint32_t crc, *mchash = arg;
 2514 
 2515         crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
 2516         /* Just want the 6 least significant bits. */
 2517         crc &= 0x3f;
 2518         /* Set the corresponding bit in the hash table. */
 2519         mchash[crc >> 5] |= 1 << (crc & 0x1f);
 2520 
 2521         return (1);
 2522 }
 2523 
 2524 static void
 2525 stge_set_multi(struct stge_softc *sc)
 2526 {
 2527         struct ifnet *ifp;
 2528         uint32_t mchash[2];
 2529         uint16_t mode;
 2530         int count;
 2531 
 2532         STGE_LOCK_ASSERT(sc);
 2533 
 2534         ifp = sc->sc_ifp;
 2535 
 2536         mode = CSR_READ_2(sc, STGE_ReceiveMode);
 2537         if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
 2538                 if ((ifp->if_flags & IFF_PROMISC) != 0)
 2539                         mode |= RM_ReceiveAllFrames;
 2540                 else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
 2541                         mode |= RM_ReceiveMulticast;
 2542                 CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
 2543                 return;
 2544         }
 2545 
 2546         /* clear existing filters. */
 2547         CSR_WRITE_4(sc, STGE_HashTable0, 0);
 2548         CSR_WRITE_4(sc, STGE_HashTable1, 0);
 2549 
 2550         /*
 2551          * Set up the multicast address filter by passing all multicast
 2552          * addresses through a CRC generator, and then using the low-order
 2553          * 6 bits as an index into the 64 bit multicast hash table.  The
 2554          * high order bits select the register, while the rest of the bits
 2555          * select the bit within the register.
 2556          */
 2557         bzero(mchash, sizeof(mchash));
 2558         count = if_foreach_llmaddr(ifp, stge_hash_maddr, mchash);
 2559 
 2560         mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
 2561         if (count > 0)
 2562                 mode |= RM_ReceiveMulticastHash;
 2563         else
 2564                 mode &= ~RM_ReceiveMulticastHash;
 2565 
 2566         CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
 2567         CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
 2568         CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
 2569 }
 2570 
 2571 static int
 2572 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 2573 {
 2574         int error, value;
 2575 
 2576         if (!arg1)
 2577                 return (EINVAL);
 2578         value = *(int *)arg1;
 2579         error = sysctl_handle_int(oidp, &value, 0, req);
 2580         if (error || !req->newptr)
 2581                 return (error);
 2582         if (value < low || value > high)
 2583                 return (EINVAL);
 2584         *(int *)arg1 = value;
 2585 
 2586         return (0);
 2587 }
 2588 
 2589 static int
 2590 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
 2591 {
 2592         return (sysctl_int_range(oidp, arg1, arg2, req,
 2593             STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
 2594 }
 2595 
 2596 static int
 2597 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
 2598 {
 2599         return (sysctl_int_range(oidp, arg1, arg2, req,
 2600             STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
 2601 }

Cache object: 200f3970e68552c22ba9e97248fb8f4b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.