The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ste/if_ste.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/8.2/sys/dev/ste/if_ste.c 214909 2010-11-07 11:12:29Z marius $");
   35 
   36 #ifdef HAVE_KERNEL_OPTION_HEADERS
   37 #include "opt_device_polling.h"
   38 #endif
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 #include <sys/bus.h>
   43 #include <sys/endian.h>
   44 #include <sys/kernel.h>
   45 #include <sys/lock.h>
   46 #include <sys/malloc.h>
   47 #include <sys/mbuf.h>
   48 #include <sys/module.h>
   49 #include <sys/rman.h>
   50 #include <sys/socket.h>
   51 #include <sys/sockio.h>
   52 #include <sys/sysctl.h>
   53 
   54 #include <net/bpf.h>
   55 #include <net/if.h>
   56 #include <net/if_arp.h>
   57 #include <net/ethernet.h>
   58 #include <net/if_dl.h>
   59 #include <net/if_media.h>
   60 #include <net/if_types.h>
   61 #include <net/if_vlan_var.h>
   62 
   63 #include <machine/bus.h>
   64 #include <machine/resource.h>
   65 
   66 #include <dev/mii/mii.h>
   67 #include <dev/mii/miivar.h>
   68 
   69 #include <dev/pci/pcireg.h>
   70 #include <dev/pci/pcivar.h>
   71 
   72 #include <dev/ste/if_stereg.h>
   73 
   74 /* "device miibus" required.  See GENERIC if you get errors here. */
   75 #include "miibus_if.h"
   76 
   77 MODULE_DEPEND(ste, pci, 1, 1, 1);
   78 MODULE_DEPEND(ste, ether, 1, 1, 1);
   79 MODULE_DEPEND(ste, miibus, 1, 1, 1);
   80 
   81 /* Define to show Tx error status. */
   82 #define STE_SHOW_TXERRORS
   83 
   84 /*
   85  * Various supported device vendors/types and their names.
   86  */
   87 static struct ste_type ste_devs[] = {
   88         { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" },
   89         { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" },
   90         { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
   91         { 0, 0, NULL }
   92 };
   93 
   94 static int      ste_attach(device_t);
   95 static int      ste_detach(device_t);
   96 static int      ste_probe(device_t);
   97 static int      ste_resume(device_t);
   98 static int      ste_shutdown(device_t);
   99 static int      ste_suspend(device_t);
  100 
  101 static int      ste_dma_alloc(struct ste_softc *);
  102 static void     ste_dma_free(struct ste_softc *);
  103 static void     ste_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  104 static int      ste_eeprom_wait(struct ste_softc *);
  105 static int      ste_encap(struct ste_softc *, struct mbuf **,
  106                     struct ste_chain *);
  107 static int      ste_ifmedia_upd(struct ifnet *);
  108 static void     ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  109 static void     ste_init(void *);
  110 static void     ste_init_locked(struct ste_softc *);
  111 static int      ste_init_rx_list(struct ste_softc *);
  112 static void     ste_init_tx_list(struct ste_softc *);
  113 static void     ste_intr(void *);
  114 static int      ste_ioctl(struct ifnet *, u_long, caddr_t);
  115 static int      ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *);
  116 static void     ste_mii_send(struct ste_softc *, uint32_t, int);
  117 static void     ste_mii_sync(struct ste_softc *);
  118 static int      ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *);
  119 static int      ste_miibus_readreg(device_t, int, int);
  120 static void     ste_miibus_statchg(device_t);
  121 static int      ste_miibus_writereg(device_t, int, int, int);
  122 static int      ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *);
  123 static int      ste_read_eeprom(struct ste_softc *, uint16_t *, int, int);
  124 static void     ste_reset(struct ste_softc *);
  125 static void     ste_restart_tx(struct ste_softc *);
  126 static int      ste_rxeof(struct ste_softc *, int);
  127 static void     ste_rxfilter(struct ste_softc *);
  128 static void     ste_setwol(struct ste_softc *);
  129 static void     ste_start(struct ifnet *);
  130 static void     ste_start_locked(struct ifnet *);
  131 static void     ste_stats_clear(struct ste_softc *);
  132 static void     ste_stats_update(struct ste_softc *);
  133 static void     ste_stop(struct ste_softc *);
  134 static void     ste_sysctl_node(struct ste_softc *);
  135 static void     ste_tick(void *);
  136 static void     ste_txeoc(struct ste_softc *);
  137 static void     ste_txeof(struct ste_softc *);
  138 static void     ste_wait(struct ste_softc *);
  139 static void     ste_watchdog(struct ste_softc *);
  140 
  141 static device_method_t ste_methods[] = {
  142         /* Device interface */
  143         DEVMETHOD(device_probe,         ste_probe),
  144         DEVMETHOD(device_attach,        ste_attach),
  145         DEVMETHOD(device_detach,        ste_detach),
  146         DEVMETHOD(device_shutdown,      ste_shutdown),
  147         DEVMETHOD(device_suspend,       ste_suspend),
  148         DEVMETHOD(device_resume,        ste_resume),
  149 
  150         /* bus interface */
  151         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  152         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  153 
  154         /* MII interface */
  155         DEVMETHOD(miibus_readreg,       ste_miibus_readreg),
  156         DEVMETHOD(miibus_writereg,      ste_miibus_writereg),
  157         DEVMETHOD(miibus_statchg,       ste_miibus_statchg),
  158 
  159         { 0, 0 }
  160 };
  161 
  162 static driver_t ste_driver = {
  163         "ste",
  164         ste_methods,
  165         sizeof(struct ste_softc)
  166 };
  167 
  168 static devclass_t ste_devclass;
  169 
  170 DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0);
  171 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0);
  172 
  173 #define STE_SETBIT4(sc, reg, x)                         \
  174         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
  175 
  176 #define STE_CLRBIT4(sc, reg, x)                         \
  177         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
  178 
  179 #define STE_SETBIT2(sc, reg, x)                         \
  180         CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
  181 
  182 #define STE_CLRBIT2(sc, reg, x)                         \
  183         CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
  184 
  185 #define STE_SETBIT1(sc, reg, x)                         \
  186         CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
  187 
  188 #define STE_CLRBIT1(sc, reg, x)                         \
  189         CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
  190 
  191 
  192 #define MII_SET(x)              STE_SETBIT1(sc, STE_PHYCTL, x)
  193 #define MII_CLR(x)              STE_CLRBIT1(sc, STE_PHYCTL, x)
  194 
  195 /*
  196  * Sync the PHYs by setting data bit and strobing the clock 32 times.
  197  */
  198 static void
  199 ste_mii_sync(struct ste_softc *sc)
  200 {
  201         int i;
  202 
  203         MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
  204 
  205         for (i = 0; i < 32; i++) {
  206                 MII_SET(STE_PHYCTL_MCLK);
  207                 DELAY(1);
  208                 MII_CLR(STE_PHYCTL_MCLK);
  209                 DELAY(1);
  210         }
  211 }
  212 
  213 /*
  214  * Clock a series of bits through the MII.
  215  */
  216 static void
  217 ste_mii_send(struct ste_softc *sc, uint32_t bits, int cnt)
  218 {
  219         int i;
  220 
  221         MII_CLR(STE_PHYCTL_MCLK);
  222 
  223         for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
  224                 if (bits & i) {
  225                         MII_SET(STE_PHYCTL_MDATA);
  226                 } else {
  227                         MII_CLR(STE_PHYCTL_MDATA);
  228                 }
  229                 DELAY(1);
  230                 MII_CLR(STE_PHYCTL_MCLK);
  231                 DELAY(1);
  232                 MII_SET(STE_PHYCTL_MCLK);
  233         }
  234 }
  235 
  236 /*
  237  * Read an PHY register through the MII.
  238  */
  239 static int
  240 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
  241 {
  242         int i, ack;
  243 
  244         /*
  245          * Set up frame for RX.
  246          */
  247         frame->mii_stdelim = STE_MII_STARTDELIM;
  248         frame->mii_opcode = STE_MII_READOP;
  249         frame->mii_turnaround = 0;
  250         frame->mii_data = 0;
  251 
  252         CSR_WRITE_2(sc, STE_PHYCTL, 0);
  253         /*
  254          * Turn on data xmit.
  255          */
  256         MII_SET(STE_PHYCTL_MDIR);
  257 
  258         ste_mii_sync(sc);
  259 
  260         /*
  261          * Send command/address info.
  262          */
  263         ste_mii_send(sc, frame->mii_stdelim, 2);
  264         ste_mii_send(sc, frame->mii_opcode, 2);
  265         ste_mii_send(sc, frame->mii_phyaddr, 5);
  266         ste_mii_send(sc, frame->mii_regaddr, 5);
  267 
  268         /* Turn off xmit. */
  269         MII_CLR(STE_PHYCTL_MDIR);
  270 
  271         /* Idle bit */
  272         MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
  273         DELAY(1);
  274         MII_SET(STE_PHYCTL_MCLK);
  275         DELAY(1);
  276 
  277         /* Check for ack */
  278         MII_CLR(STE_PHYCTL_MCLK);
  279         DELAY(1);
  280         ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
  281         MII_SET(STE_PHYCTL_MCLK);
  282         DELAY(1);
  283 
  284         /*
  285          * Now try reading data bits. If the ack failed, we still
  286          * need to clock through 16 cycles to keep the PHY(s) in sync.
  287          */
  288         if (ack) {
  289                 for (i = 0; i < 16; i++) {
  290                         MII_CLR(STE_PHYCTL_MCLK);
  291                         DELAY(1);
  292                         MII_SET(STE_PHYCTL_MCLK);
  293                         DELAY(1);
  294                 }
  295                 goto fail;
  296         }
  297 
  298         for (i = 0x8000; i; i >>= 1) {
  299                 MII_CLR(STE_PHYCTL_MCLK);
  300                 DELAY(1);
  301                 if (!ack) {
  302                         if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
  303                                 frame->mii_data |= i;
  304                         DELAY(1);
  305                 }
  306                 MII_SET(STE_PHYCTL_MCLK);
  307                 DELAY(1);
  308         }
  309 
  310 fail:
  311 
  312         MII_CLR(STE_PHYCTL_MCLK);
  313         DELAY(1);
  314         MII_SET(STE_PHYCTL_MCLK);
  315         DELAY(1);
  316 
  317         if (ack)
  318                 return (1);
  319         return (0);
  320 }
  321 
  322 /*
  323  * Write to a PHY register through the MII.
  324  */
  325 static int
  326 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
  327 {
  328 
  329         /*
  330          * Set up frame for TX.
  331          */
  332 
  333         frame->mii_stdelim = STE_MII_STARTDELIM;
  334         frame->mii_opcode = STE_MII_WRITEOP;
  335         frame->mii_turnaround = STE_MII_TURNAROUND;
  336 
  337         /*
  338          * Turn on data output.
  339          */
  340         MII_SET(STE_PHYCTL_MDIR);
  341 
  342         ste_mii_sync(sc);
  343 
  344         ste_mii_send(sc, frame->mii_stdelim, 2);
  345         ste_mii_send(sc, frame->mii_opcode, 2);
  346         ste_mii_send(sc, frame->mii_phyaddr, 5);
  347         ste_mii_send(sc, frame->mii_regaddr, 5);
  348         ste_mii_send(sc, frame->mii_turnaround, 2);
  349         ste_mii_send(sc, frame->mii_data, 16);
  350 
  351         /* Idle bit. */
  352         MII_SET(STE_PHYCTL_MCLK);
  353         DELAY(1);
  354         MII_CLR(STE_PHYCTL_MCLK);
  355         DELAY(1);
  356 
  357         /*
  358          * Turn off xmit.
  359          */
  360         MII_CLR(STE_PHYCTL_MDIR);
  361 
  362         return (0);
  363 }
  364 
  365 static int
  366 ste_miibus_readreg(device_t dev, int phy, int reg)
  367 {
  368         struct ste_softc *sc;
  369         struct ste_mii_frame frame;
  370 
  371         sc = device_get_softc(dev);
  372         bzero((char *)&frame, sizeof(frame));
  373 
  374         frame.mii_phyaddr = phy;
  375         frame.mii_regaddr = reg;
  376         ste_mii_readreg(sc, &frame);
  377 
  378         return (frame.mii_data);
  379 }
  380 
  381 static int
  382 ste_miibus_writereg(device_t dev, int phy, int reg, int data)
  383 {
  384         struct ste_softc *sc;
  385         struct ste_mii_frame frame;
  386 
  387         sc = device_get_softc(dev);
  388         bzero((char *)&frame, sizeof(frame));
  389 
  390         frame.mii_phyaddr = phy;
  391         frame.mii_regaddr = reg;
  392         frame.mii_data = data;
  393 
  394         ste_mii_writereg(sc, &frame);
  395 
  396         return (0);
  397 }
  398 
  399 static void
  400 ste_miibus_statchg(device_t dev)
  401 {
  402         struct ste_softc *sc;
  403         struct mii_data *mii;
  404         struct ifnet *ifp;
  405         uint16_t cfg;
  406 
  407         sc = device_get_softc(dev);
  408 
  409         mii = device_get_softc(sc->ste_miibus);
  410         ifp = sc->ste_ifp;
  411         if (mii == NULL || ifp == NULL ||
  412             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  413                 return;
  414 
  415         sc->ste_flags &= ~STE_FLAG_LINK;
  416         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
  417             (IFM_ACTIVE | IFM_AVALID)) {
  418                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  419                 case IFM_10_T:
  420                 case IFM_100_TX:
  421                 case IFM_100_FX:
  422                 case IFM_100_T4:
  423                         sc->ste_flags |= STE_FLAG_LINK;
  424                 default:
  425                         break;
  426                 }
  427         }
  428 
  429         /* Program MACs with resolved speed/duplex/flow-control. */
  430         if ((sc->ste_flags & STE_FLAG_LINK) != 0) {
  431                 cfg = CSR_READ_2(sc, STE_MACCTL0);
  432                 cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX);
  433                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  434                         /*
  435                          * ST201 data sheet says driver should enable receiving
  436                          * MAC control frames bit of receive mode register to
  437                          * receive flow-control frames but the register has no
  438                          * such bits. In addition the controller has no ability
  439                          * to send pause frames so it should be handled in
  440                          * driver. Implementing pause timer handling in driver
  441                          * layer is not trivial, so don't enable flow-control
  442                          * here.
  443                          */
  444                         cfg |= STE_MACCTL0_FULLDUPLEX;
  445                 }
  446                 CSR_WRITE_2(sc, STE_MACCTL0, cfg);
  447         }
  448 }
  449 
  450 static int
  451 ste_ifmedia_upd(struct ifnet *ifp)
  452 {
  453         struct ste_softc *sc;
  454         struct mii_data *mii;
  455         struct mii_softc *miisc;
  456         int error;
  457 
  458         sc = ifp->if_softc;
  459         STE_LOCK(sc);
  460         mii = device_get_softc(sc->ste_miibus);
  461         if (mii->mii_instance) {
  462                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
  463                         mii_phy_reset(miisc);
  464         }
  465         error = mii_mediachg(mii);
  466         STE_UNLOCK(sc);
  467 
  468         return (error);
  469 }
  470 
  471 static void
  472 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  473 {
  474         struct ste_softc *sc;
  475         struct mii_data *mii;
  476 
  477         sc = ifp->if_softc;
  478         mii = device_get_softc(sc->ste_miibus);
  479 
  480         STE_LOCK(sc);
  481         if ((ifp->if_flags & IFF_UP) == 0) {
  482                 STE_UNLOCK(sc);
  483                 return;
  484         }
  485         mii_pollstat(mii);
  486         ifmr->ifm_active = mii->mii_media_active;
  487         ifmr->ifm_status = mii->mii_media_status;
  488         STE_UNLOCK(sc);
  489 }
  490 
  491 static void
  492 ste_wait(struct ste_softc *sc)
  493 {
  494         int i;
  495 
  496         for (i = 0; i < STE_TIMEOUT; i++) {
  497                 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
  498                         break;
  499                 DELAY(1);
  500         }
  501 
  502         if (i == STE_TIMEOUT)
  503                 device_printf(sc->ste_dev, "command never completed!\n");
  504 }
  505 
  506 /*
  507  * The EEPROM is slow: give it time to come ready after issuing
  508  * it a command.
  509  */
  510 static int
  511 ste_eeprom_wait(struct ste_softc *sc)
  512 {
  513         int i;
  514 
  515         DELAY(1000);
  516 
  517         for (i = 0; i < 100; i++) {
  518                 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
  519                         DELAY(1000);
  520                 else
  521                         break;
  522         }
  523 
  524         if (i == 100) {
  525                 device_printf(sc->ste_dev, "eeprom failed to come ready\n");
  526                 return (1);
  527         }
  528 
  529         return (0);
  530 }
  531 
  532 /*
  533  * Read a sequence of words from the EEPROM. Note that ethernet address
  534  * data is stored in the EEPROM in network byte order.
  535  */
  536 static int
  537 ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt)
  538 {
  539         int err = 0, i;
  540 
  541         if (ste_eeprom_wait(sc))
  542                 return (1);
  543 
  544         for (i = 0; i < cnt; i++) {
  545                 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
  546                 err = ste_eeprom_wait(sc);
  547                 if (err)
  548                         break;
  549                 *dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA));
  550                 dest++;
  551         }
  552 
  553         return (err ? 1 : 0);
  554 }
  555 
  556 static void
  557 ste_rxfilter(struct ste_softc *sc)
  558 {
  559         struct ifnet *ifp;
  560         struct ifmultiaddr *ifma;
  561         uint32_t hashes[2] = { 0, 0 };
  562         uint8_t rxcfg;
  563         int h;
  564 
  565         STE_LOCK_ASSERT(sc);
  566 
  567         ifp = sc->ste_ifp;
  568         rxcfg = CSR_READ_1(sc, STE_RX_MODE);
  569         rxcfg |= STE_RXMODE_UNICAST;
  570         rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH |
  571             STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC);
  572         if (ifp->if_flags & IFF_BROADCAST)
  573                 rxcfg |= STE_RXMODE_BROADCAST;
  574         if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
  575                 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
  576                         rxcfg |= STE_RXMODE_ALLMULTI;
  577                 if ((ifp->if_flags & IFF_PROMISC) != 0)
  578                         rxcfg |= STE_RXMODE_PROMISC;
  579                 goto chipit;
  580         }
  581 
  582         rxcfg |= STE_RXMODE_MULTIHASH;
  583         /* Now program new ones. */
  584         if_maddr_rlock(ifp);
  585         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  586                 if (ifma->ifma_addr->sa_family != AF_LINK)
  587                         continue;
  588                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  589                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F;
  590                 if (h < 32)
  591                         hashes[0] |= (1 << h);
  592                 else
  593                         hashes[1] |= (1 << (h - 32));
  594         }
  595         if_maddr_runlock(ifp);
  596 
  597 chipit:
  598         CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
  599         CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
  600         CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
  601         CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
  602         CSR_WRITE_1(sc, STE_RX_MODE, rxcfg);
  603         CSR_READ_1(sc, STE_RX_MODE);
  604 }
  605 
  606 #ifdef DEVICE_POLLING
  607 static poll_handler_t ste_poll, ste_poll_locked;
  608 
  609 static int
  610 ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
  611 {
  612         struct ste_softc *sc = ifp->if_softc;
  613         int rx_npkts = 0;
  614 
  615         STE_LOCK(sc);
  616         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
  617                 rx_npkts = ste_poll_locked(ifp, cmd, count);
  618         STE_UNLOCK(sc);
  619         return (rx_npkts);
  620 }
  621 
  622 static int
  623 ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
  624 {
  625         struct ste_softc *sc = ifp->if_softc;
  626         int rx_npkts;
  627 
  628         STE_LOCK_ASSERT(sc);
  629 
  630         rx_npkts = ste_rxeof(sc, count);
  631         ste_txeof(sc);
  632         ste_txeoc(sc);
  633         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  634                 ste_start_locked(ifp);
  635 
  636         if (cmd == POLL_AND_CHECK_STATUS) {
  637                 uint16_t status;
  638 
  639                 status = CSR_READ_2(sc, STE_ISR_ACK);
  640 
  641                 if (status & STE_ISR_STATS_OFLOW)
  642                         ste_stats_update(sc);
  643 
  644                 if (status & STE_ISR_HOSTERR) {
  645                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  646                         ste_init_locked(sc);
  647                 }
  648         }
  649         return (rx_npkts);
  650 }
  651 #endif /* DEVICE_POLLING */
  652 
  653 static void
  654 ste_intr(void *xsc)
  655 {
  656         struct ste_softc *sc;
  657         struct ifnet *ifp;
  658         uint16_t intrs, status;
  659 
  660         sc = xsc;
  661         STE_LOCK(sc);
  662         ifp = sc->ste_ifp;
  663 
  664 #ifdef DEVICE_POLLING
  665         if (ifp->if_capenable & IFCAP_POLLING) {
  666                 STE_UNLOCK(sc);
  667                 return;
  668         }
  669 #endif
  670         /* Reading STE_ISR_ACK clears STE_IMR register. */
  671         status = CSR_READ_2(sc, STE_ISR_ACK);
  672         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
  673                 STE_UNLOCK(sc);
  674                 return;
  675         }
  676 
  677         intrs = STE_INTRS;
  678         if (status == 0xFFFF || (status & intrs) == 0)
  679                 goto done;
  680 
  681         if (sc->ste_int_rx_act > 0) {
  682                 status &= ~STE_ISR_RX_DMADONE;
  683                 intrs &= ~STE_IMR_RX_DMADONE;
  684         }
  685 
  686         if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) {
  687                 ste_rxeof(sc, -1);
  688                 /*
  689                  * The controller has no ability to Rx interrupt
  690                  * moderation feature. Receiving 64 bytes frames
  691                  * from wire generates too many interrupts which in
  692                  * turn make system useless to process other useful
  693                  * things. Fortunately ST201 supports single shot
  694                  * timer so use the timer to implement Rx interrupt
  695                  * moderation in driver. This adds more register
  696                  * access but it greatly reduces number of Rx
  697                  * interrupts under high network load.
  698                  */
  699                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
  700                     (sc->ste_int_rx_mod != 0)) {
  701                         if ((status & STE_ISR_RX_DMADONE) != 0) {
  702                                 CSR_WRITE_2(sc, STE_COUNTDOWN,
  703                                     STE_TIMER_USECS(sc->ste_int_rx_mod));
  704                                 intrs &= ~STE_IMR_RX_DMADONE;
  705                                 sc->ste_int_rx_act = 1;
  706                         } else {
  707                                 intrs |= STE_IMR_RX_DMADONE;
  708                                 sc->ste_int_rx_act = 0;
  709                         }
  710                 }
  711         }
  712         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
  713                 if ((status & STE_ISR_TX_DMADONE) != 0)
  714                         ste_txeof(sc);
  715                 if ((status & STE_ISR_TX_DONE) != 0)
  716                         ste_txeoc(sc);
  717                 if ((status & STE_ISR_STATS_OFLOW) != 0)
  718                         ste_stats_update(sc);
  719                 if ((status & STE_ISR_HOSTERR) != 0) {
  720                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  721                         ste_init_locked(sc);
  722                         STE_UNLOCK(sc);
  723                         return;
  724                 }
  725                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
  726                         ste_start_locked(ifp);
  727 done:
  728                 /* Re-enable interrupts */
  729                 CSR_WRITE_2(sc, STE_IMR, intrs);
  730         }
  731         STE_UNLOCK(sc);
  732 }
  733 
  734 /*
  735  * A frame has been uploaded: pass the resulting mbuf chain up to
  736  * the higher level protocols.
  737  */
  738 static int
  739 ste_rxeof(struct ste_softc *sc, int count)
  740 {
  741         struct mbuf *m;
  742         struct ifnet *ifp;
  743         struct ste_chain_onefrag *cur_rx;
  744         uint32_t rxstat;
  745         int total_len, rx_npkts;
  746 
  747         ifp = sc->ste_ifp;
  748 
  749         bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
  750             sc->ste_cdata.ste_rx_list_map,
  751             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  752 
  753         cur_rx = sc->ste_cdata.ste_rx_head;
  754         for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++,
  755             cur_rx = cur_rx->ste_next) {
  756                 rxstat = le32toh(cur_rx->ste_ptr->ste_status);
  757                 if ((rxstat & STE_RXSTAT_DMADONE) == 0)
  758                         break;
  759 #ifdef DEVICE_POLLING
  760                 if (ifp->if_capenable & IFCAP_POLLING) {
  761                         if (count == 0)
  762                                 break;
  763                         count--;
  764                 }
  765 #endif
  766                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  767                         break;
  768                 /*
  769                  * If an error occurs, update stats, clear the
  770                  * status word and leave the mbuf cluster in place:
  771                  * it should simply get re-used next time this descriptor
  772                  * comes up in the ring.
  773                  */
  774                 if (rxstat & STE_RXSTAT_FRAME_ERR) {
  775                         ifp->if_ierrors++;
  776                         cur_rx->ste_ptr->ste_status = 0;
  777                         continue;
  778                 }
  779 
  780                 /* No errors; receive the packet. */
  781                 m = cur_rx->ste_mbuf;
  782                 total_len = STE_RX_BYTES(rxstat);
  783 
  784                 /*
  785                  * Try to conjure up a new mbuf cluster. If that
  786                  * fails, it means we have an out of memory condition and
  787                  * should leave the buffer in place and continue. This will
  788                  * result in a lost packet, but there's little else we
  789                  * can do in this situation.
  790                  */
  791                 if (ste_newbuf(sc, cur_rx) != 0) {
  792                         ifp->if_iqdrops++;
  793                         cur_rx->ste_ptr->ste_status = 0;
  794                         continue;
  795                 }
  796 
  797                 m->m_pkthdr.rcvif = ifp;
  798                 m->m_pkthdr.len = m->m_len = total_len;
  799 
  800                 ifp->if_ipackets++;
  801                 STE_UNLOCK(sc);
  802                 (*ifp->if_input)(ifp, m);
  803                 STE_LOCK(sc);
  804         }
  805 
  806         if (rx_npkts > 0) {
  807                 sc->ste_cdata.ste_rx_head = cur_rx;
  808                 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
  809                     sc->ste_cdata.ste_rx_list_map,
  810                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  811         }
  812 
  813         return (rx_npkts);
  814 }
  815 
  816 static void
  817 ste_txeoc(struct ste_softc *sc)
  818 {
  819         uint16_t txstat;
  820         struct ifnet *ifp;
  821 
  822         STE_LOCK_ASSERT(sc);
  823 
  824         ifp = sc->ste_ifp;
  825 
  826         /*
  827          * STE_TX_STATUS register implements a queue of up to 31
  828          * transmit status byte. Writing an arbitrary value to the
  829          * register will advance the queue to the next transmit
  830          * status byte. This means if driver does not read
  831          * STE_TX_STATUS register after completing sending more
  832          * than 31 frames the controller would be stalled so driver
  833          * should re-wake the Tx MAC. This is the most severe
  834          * limitation of ST201 based controller.
  835          */
  836         for (;;) {
  837                 txstat = CSR_READ_2(sc, STE_TX_STATUS);
  838                 if ((txstat & STE_TXSTATUS_TXDONE) == 0)
  839                         break;
  840                 if ((txstat & (STE_TXSTATUS_UNDERRUN |
  841                     STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR |
  842                     STE_TXSTATUS_STATSOFLOW)) != 0) {
  843                         ifp->if_oerrors++;
  844 #ifdef  STE_SHOW_TXERRORS
  845                         device_printf(sc->ste_dev, "TX error : 0x%b\n",
  846                             txstat & 0xFF, STE_ERR_BITS);
  847 #endif
  848                         if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 &&
  849                             sc->ste_tx_thresh < STE_PACKET_SIZE) {
  850                                 sc->ste_tx_thresh += STE_MIN_FRAMELEN;
  851                                 if (sc->ste_tx_thresh > STE_PACKET_SIZE)
  852                                         sc->ste_tx_thresh = STE_PACKET_SIZE;
  853                                 device_printf(sc->ste_dev,
  854                                     "TX underrun, increasing TX"
  855                                     " start threshold to %d bytes\n",
  856                                     sc->ste_tx_thresh);
  857                                 /* Make sure to disable active DMA cycles. */
  858                                 STE_SETBIT4(sc, STE_DMACTL,
  859                                     STE_DMACTL_TXDMA_STALL);
  860                                 ste_wait(sc);
  861                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  862                                 ste_init_locked(sc);
  863                                 break;
  864                         }
  865                         /* Restart Tx. */
  866                         ste_restart_tx(sc);
  867                 }
  868                 /*
  869                  * Advance to next status and ACK TxComplete
  870                  * interrupt. ST201 data sheet was wrong here, to
  871                  * get next Tx status, we have to write both
  872                  * STE_TX_STATUS and STE_TX_FRAMEID register.
  873                  * Otherwise controller returns the same status
  874                  * as well as not acknowledge Tx completion
  875                  * interrupt.
  876                  */
  877                 CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
  878         }
  879 }
  880 
  881 static void
  882 ste_tick(void *arg)
  883 {
  884         struct ste_softc *sc;
  885         struct mii_data *mii;
  886 
  887         sc = (struct ste_softc *)arg;
  888 
  889         STE_LOCK_ASSERT(sc);
  890 
  891         mii = device_get_softc(sc->ste_miibus);
  892         mii_tick(mii);
  893         /*
  894          * ukphy(4) does not seem to generate CB that reports
  895          * resolved link state so if we know we lost a link,
  896          * explicitly check the link state.
  897          */
  898         if ((sc->ste_flags & STE_FLAG_LINK) == 0)
  899                 ste_miibus_statchg(sc->ste_dev);
  900         /*
  901          * Because we are not generating Tx completion
  902          * interrupt for every frame, reclaim transmitted
  903          * buffers here.
  904          */
  905         ste_txeof(sc);
  906         ste_txeoc(sc);
  907         ste_stats_update(sc);
  908         ste_watchdog(sc);
  909         callout_reset(&sc->ste_callout, hz, ste_tick, sc);
  910 }
  911 
  912 static void
  913 ste_txeof(struct ste_softc *sc)
  914 {
  915         struct ifnet *ifp;
  916         struct ste_chain *cur_tx;
  917         uint32_t txstat;
  918         int idx;
  919 
  920         STE_LOCK_ASSERT(sc);
  921 
  922         ifp = sc->ste_ifp;
  923         idx = sc->ste_cdata.ste_tx_cons;
  924         if (idx == sc->ste_cdata.ste_tx_prod)
  925                 return;
  926 
  927         bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
  928             sc->ste_cdata.ste_tx_list_map,
  929             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  930 
  931         while (idx != sc->ste_cdata.ste_tx_prod) {
  932                 cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
  933                 txstat = le32toh(cur_tx->ste_ptr->ste_ctl);
  934                 if ((txstat & STE_TXCTL_DMADONE) == 0)
  935                         break;
  936                 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map,
  937                     BUS_DMASYNC_POSTWRITE);
  938                 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map);
  939                 KASSERT(cur_tx->ste_mbuf != NULL,
  940                     ("%s: freeing NULL mbuf!\n", __func__));
  941                 m_freem(cur_tx->ste_mbuf);
  942                 cur_tx->ste_mbuf = NULL;
  943                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  944                 ifp->if_opackets++;
  945                 sc->ste_cdata.ste_tx_cnt--;
  946                 STE_INC(idx, STE_TX_LIST_CNT);
  947         }
  948 
  949         sc->ste_cdata.ste_tx_cons = idx;
  950         if (sc->ste_cdata.ste_tx_cnt == 0)
  951                 sc->ste_timer = 0;
  952 }
  953 
  954 static void
  955 ste_stats_clear(struct ste_softc *sc)
  956 {
  957 
  958         STE_LOCK_ASSERT(sc);
  959 
  960         /* Rx stats. */
  961         CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO);
  962         CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI);
  963         CSR_READ_2(sc, STE_STAT_RX_FRAMES);
  964         CSR_READ_1(sc, STE_STAT_RX_BCAST);
  965         CSR_READ_1(sc, STE_STAT_RX_MCAST);
  966         CSR_READ_1(sc, STE_STAT_RX_LOST);
  967         /* Tx stats. */
  968         CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO);
  969         CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI);
  970         CSR_READ_2(sc, STE_STAT_TX_FRAMES);
  971         CSR_READ_1(sc, STE_STAT_TX_BCAST);
  972         CSR_READ_1(sc, STE_STAT_TX_MCAST);
  973         CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
  974         CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
  975         CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
  976         CSR_READ_1(sc, STE_STAT_LATE_COLLS);
  977         CSR_READ_1(sc, STE_STAT_TX_DEFER);
  978         CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
  979         CSR_READ_1(sc, STE_STAT_TX_ABORT);
  980 }
  981 
  982 static void
  983 ste_stats_update(struct ste_softc *sc)
  984 {
  985         struct ifnet *ifp;
  986         struct ste_hw_stats *stats;
  987         uint32_t val;
  988 
  989         STE_LOCK_ASSERT(sc);
  990 
  991         ifp = sc->ste_ifp;
  992         stats = &sc->ste_stats;
  993         /* Rx stats. */
  994         val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) |
  995             ((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16;
  996         val &= 0x000FFFFF;
  997         stats->rx_bytes += val;
  998         stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES);
  999         stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST);
 1000         stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST);
 1001         stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST);
 1002         /* Tx stats. */
 1003         val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) |
 1004             ((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16;
 1005         val &= 0x000FFFFF;
 1006         stats->tx_bytes += val;
 1007         stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES);
 1008         stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST);
 1009         stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST);
 1010         stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
 1011         val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
 1012         stats->tx_single_colls += val;
 1013         ifp->if_collisions += val;
 1014         val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
 1015         stats->tx_multi_colls += val;
 1016         ifp->if_collisions += val;
 1017         val += CSR_READ_1(sc, STE_STAT_LATE_COLLS);
 1018         stats->tx_late_colls += val;
 1019         ifp->if_collisions += val;
 1020         stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER);
 1021         stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
 1022         stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT);
 1023 }
 1024 
 1025 /*
 1026  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
 1027  * IDs against our list and return a device name if we find a match.
 1028  */
 1029 static int
 1030 ste_probe(device_t dev)
 1031 {
 1032         struct ste_type *t;
 1033 
 1034         t = ste_devs;
 1035 
 1036         while (t->ste_name != NULL) {
 1037                 if ((pci_get_vendor(dev) == t->ste_vid) &&
 1038                     (pci_get_device(dev) == t->ste_did)) {
 1039                         device_set_desc(dev, t->ste_name);
 1040                         return (BUS_PROBE_DEFAULT);
 1041                 }
 1042                 t++;
 1043         }
 1044 
 1045         return (ENXIO);
 1046 }
 1047 
 1048 /*
 1049  * Attach the interface. Allocate softc structures, do ifmedia
 1050  * setup and ethernet/BPF attach.
 1051  */
 1052 static int
 1053 ste_attach(device_t dev)
 1054 {
 1055         struct ste_softc *sc;
 1056         struct ifnet *ifp;
 1057         uint16_t eaddr[ETHER_ADDR_LEN / 2];
 1058         int error = 0, phy, pmc, prefer_iomap, rid;
 1059 
 1060         sc = device_get_softc(dev);
 1061         sc->ste_dev = dev;
 1062 
 1063         /*
 1064          * Only use one PHY since this chip reports multiple
 1065          * Note on the DFE-550 the PHY is at 1 on the DFE-580
 1066          * it is at 0 & 1.  It is rev 0x12.
 1067          */
 1068         if (pci_get_vendor(dev) == DL_VENDORID &&
 1069             pci_get_device(dev) == DL_DEVICEID_DL10050 &&
 1070             pci_get_revid(dev) == 0x12 )
 1071                 sc->ste_flags |= STE_FLAG_ONE_PHY;
 1072 
 1073         mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1074             MTX_DEF);
 1075         /*
 1076          * Map control/status registers.
 1077          */
 1078         pci_enable_busmaster(dev);
 1079 
 1080         /*
 1081          * Prefer memory space register mapping over IO space but use
 1082          * IO space for a device that is known to have issues on memory
 1083          * mapping.
 1084          */
 1085         prefer_iomap = 0;
 1086         if (pci_get_device(dev) == ST_DEVICEID_ST201_1)
 1087                 prefer_iomap = 1;
 1088         else
 1089                 resource_int_value(device_get_name(sc->ste_dev),
 1090                     device_get_unit(sc->ste_dev), "prefer_iomap",
 1091                     &prefer_iomap);
 1092         if (prefer_iomap == 0) {
 1093                 sc->ste_res_id = PCIR_BAR(1);
 1094                 sc->ste_res_type = SYS_RES_MEMORY;
 1095                 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
 1096                     &sc->ste_res_id, RF_ACTIVE);
 1097         }
 1098         if (prefer_iomap || sc->ste_res == NULL) {
 1099                 sc->ste_res_id = PCIR_BAR(0);
 1100                 sc->ste_res_type = SYS_RES_IOPORT;
 1101                 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
 1102                     &sc->ste_res_id, RF_ACTIVE);
 1103         }
 1104         if (sc->ste_res == NULL) {
 1105                 device_printf(dev, "couldn't map ports/memory\n");
 1106                 error = ENXIO;
 1107                 goto fail;
 1108         }
 1109 
 1110         /* Allocate interrupt */
 1111         rid = 0;
 1112         sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1113             RF_SHAREABLE | RF_ACTIVE);
 1114 
 1115         if (sc->ste_irq == NULL) {
 1116                 device_printf(dev, "couldn't map interrupt\n");
 1117                 error = ENXIO;
 1118                 goto fail;
 1119         }
 1120 
 1121         callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0);
 1122 
 1123         /* Reset the adapter. */
 1124         ste_reset(sc);
 1125 
 1126         /*
 1127          * Get station address from the EEPROM.
 1128          */
 1129         if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) {
 1130                 device_printf(dev, "failed to read station address\n");
 1131                 error = ENXIO;
 1132                 goto fail;
 1133         }
 1134         ste_sysctl_node(sc);
 1135 
 1136         if ((error = ste_dma_alloc(sc)) != 0)
 1137                 goto fail;
 1138 
 1139         ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
 1140         if (ifp == NULL) {
 1141                 device_printf(dev, "can not if_alloc()\n");
 1142                 error = ENOSPC;
 1143                 goto fail;
 1144         }
 1145 
 1146         /* Do MII setup. */
 1147         phy = MII_PHY_ANY;
 1148         if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0)
 1149                 phy = 0;
 1150         error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd,
 1151                 ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
 1152         if (error != 0) {
 1153                 device_printf(dev, "attaching PHYs failed\n");
 1154                 goto fail;
 1155         }
 1156 
 1157         ifp->if_softc = sc;
 1158         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1159         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1160         ifp->if_ioctl = ste_ioctl;
 1161         ifp->if_start = ste_start;
 1162         ifp->if_init = ste_init;
 1163         IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
 1164         ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1;
 1165         IFQ_SET_READY(&ifp->if_snd);
 1166 
 1167         sc->ste_tx_thresh = STE_TXSTART_THRESH;
 1168 
 1169         /*
 1170          * Call MI attach routine.
 1171          */
 1172         ether_ifattach(ifp, (uint8_t *)eaddr);
 1173 
 1174         /*
 1175          * Tell the upper layer(s) we support long frames.
 1176          */
 1177         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1178         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1179         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
 1180                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
 1181         ifp->if_capenable = ifp->if_capabilities;
 1182 #ifdef DEVICE_POLLING
 1183         ifp->if_capabilities |= IFCAP_POLLING;
 1184 #endif
 1185 
 1186         /* Hook interrupt last to avoid having to lock softc */
 1187         error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1188             NULL, ste_intr, sc, &sc->ste_intrhand);
 1189 
 1190         if (error) {
 1191                 device_printf(dev, "couldn't set up irq\n");
 1192                 ether_ifdetach(ifp);
 1193                 goto fail;
 1194         }
 1195 
 1196 fail:
 1197         if (error)
 1198                 ste_detach(dev);
 1199 
 1200         return (error);
 1201 }
 1202 
 1203 /*
 1204  * Shutdown hardware and free up resources. This can be called any
 1205  * time after the mutex has been initialized. It is called in both
 1206  * the error case in attach and the normal detach case so it needs
 1207  * to be careful about only freeing resources that have actually been
 1208  * allocated.
 1209  */
 1210 static int
 1211 ste_detach(device_t dev)
 1212 {
 1213         struct ste_softc *sc;
 1214         struct ifnet *ifp;
 1215 
 1216         sc = device_get_softc(dev);
 1217         KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
 1218         ifp = sc->ste_ifp;
 1219 
 1220 #ifdef DEVICE_POLLING
 1221         if (ifp->if_capenable & IFCAP_POLLING)
 1222                 ether_poll_deregister(ifp);
 1223 #endif
 1224 
 1225         /* These should only be active if attach succeeded */
 1226         if (device_is_attached(dev)) {
 1227                 ether_ifdetach(ifp);
 1228                 STE_LOCK(sc);
 1229                 ste_stop(sc);
 1230                 STE_UNLOCK(sc);
 1231                 callout_drain(&sc->ste_callout);
 1232         }
 1233         if (sc->ste_miibus)
 1234                 device_delete_child(dev, sc->ste_miibus);
 1235         bus_generic_detach(dev);
 1236 
 1237         if (sc->ste_intrhand)
 1238                 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
 1239         if (sc->ste_irq)
 1240                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
 1241         if (sc->ste_res)
 1242                 bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id,
 1243                     sc->ste_res);
 1244 
 1245         if (ifp)
 1246                 if_free(ifp);
 1247 
 1248         ste_dma_free(sc);
 1249         mtx_destroy(&sc->ste_mtx);
 1250 
 1251         return (0);
 1252 }
 1253 
 1254 struct ste_dmamap_arg {
 1255         bus_addr_t      ste_busaddr;
 1256 };
 1257 
 1258 static void
 1259 ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
 1260 {
 1261         struct ste_dmamap_arg *ctx;
 1262 
 1263         if (error != 0)
 1264                 return;
 1265 
 1266         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1267 
 1268         ctx = (struct ste_dmamap_arg *)arg;
 1269         ctx->ste_busaddr = segs[0].ds_addr;
 1270 }
 1271 
 1272 static int
 1273 ste_dma_alloc(struct ste_softc *sc)
 1274 {
 1275         struct ste_chain *txc;
 1276         struct ste_chain_onefrag *rxc;
 1277         struct ste_dmamap_arg ctx;
 1278         int error, i;
 1279 
 1280         /* Create parent DMA tag. */
 1281         error = bus_dma_tag_create(
 1282             bus_get_dma_tag(sc->ste_dev), /* parent */
 1283             1, 0,                       /* alignment, boundary */
 1284             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1285             BUS_SPACE_MAXADDR,          /* highaddr */
 1286             NULL, NULL,                 /* filter, filterarg */
 1287             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1288             0,                          /* nsegments */
 1289             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1290             0,                          /* flags */
 1291             NULL, NULL,                 /* lockfunc, lockarg */
 1292             &sc->ste_cdata.ste_parent_tag);
 1293         if (error != 0) {
 1294                 device_printf(sc->ste_dev,
 1295                     "could not create parent DMA tag.\n");
 1296                 goto fail;
 1297         }
 1298 
 1299         /* Create DMA tag for Tx descriptor list. */
 1300         error = bus_dma_tag_create(
 1301             sc->ste_cdata.ste_parent_tag, /* parent */
 1302             STE_DESC_ALIGN, 0,          /* alignment, boundary */
 1303             BUS_SPACE_MAXADDR,          /* lowaddr */
 1304             BUS_SPACE_MAXADDR,          /* highaddr */
 1305             NULL, NULL,                 /* filter, filterarg */
 1306             STE_TX_LIST_SZ,             /* maxsize */
 1307             1,                          /* nsegments */
 1308             STE_TX_LIST_SZ,             /* maxsegsize */
 1309             0,                          /* flags */
 1310             NULL, NULL,                 /* lockfunc, lockarg */
 1311             &sc->ste_cdata.ste_tx_list_tag);
 1312         if (error != 0) {
 1313                 device_printf(sc->ste_dev,
 1314                     "could not create Tx list DMA tag.\n");
 1315                 goto fail;
 1316         }
 1317 
 1318         /* Create DMA tag for Rx descriptor list. */
 1319         error = bus_dma_tag_create(
 1320             sc->ste_cdata.ste_parent_tag, /* parent */
 1321             STE_DESC_ALIGN, 0,          /* alignment, boundary */
 1322             BUS_SPACE_MAXADDR,          /* lowaddr */
 1323             BUS_SPACE_MAXADDR,          /* highaddr */
 1324             NULL, NULL,                 /* filter, filterarg */
 1325             STE_RX_LIST_SZ,             /* maxsize */
 1326             1,                          /* nsegments */
 1327             STE_RX_LIST_SZ,             /* maxsegsize */
 1328             0,                          /* flags */
 1329             NULL, NULL,                 /* lockfunc, lockarg */
 1330             &sc->ste_cdata.ste_rx_list_tag);
 1331         if (error != 0) {
 1332                 device_printf(sc->ste_dev,
 1333                     "could not create Rx list DMA tag.\n");
 1334                 goto fail;
 1335         }
 1336 
 1337         /* Create DMA tag for Tx buffers. */
 1338         error = bus_dma_tag_create(
 1339             sc->ste_cdata.ste_parent_tag, /* parent */
 1340             1, 0,                       /* alignment, boundary */
 1341             BUS_SPACE_MAXADDR,          /* lowaddr */
 1342             BUS_SPACE_MAXADDR,          /* highaddr */
 1343             NULL, NULL,                 /* filter, filterarg */
 1344             MCLBYTES * STE_MAXFRAGS,    /* maxsize */
 1345             STE_MAXFRAGS,               /* nsegments */
 1346             MCLBYTES,                   /* maxsegsize */
 1347             0,                          /* flags */
 1348             NULL, NULL,                 /* lockfunc, lockarg */
 1349             &sc->ste_cdata.ste_tx_tag);
 1350         if (error != 0) {
 1351                 device_printf(sc->ste_dev, "could not create Tx DMA tag.\n");
 1352                 goto fail;
 1353         }
 1354 
 1355         /* Create DMA tag for Rx buffers. */
 1356         error = bus_dma_tag_create(
 1357             sc->ste_cdata.ste_parent_tag, /* parent */
 1358             1, 0,                       /* alignment, boundary */
 1359             BUS_SPACE_MAXADDR,          /* lowaddr */
 1360             BUS_SPACE_MAXADDR,          /* highaddr */
 1361             NULL, NULL,                 /* filter, filterarg */
 1362             MCLBYTES,                   /* maxsize */
 1363             1,                          /* nsegments */
 1364             MCLBYTES,                   /* maxsegsize */
 1365             0,                          /* flags */
 1366             NULL, NULL,                 /* lockfunc, lockarg */
 1367             &sc->ste_cdata.ste_rx_tag);
 1368         if (error != 0) {
 1369                 device_printf(sc->ste_dev, "could not create Rx DMA tag.\n");
 1370                 goto fail;
 1371         }
 1372 
 1373         /* Allocate DMA'able memory and load the DMA map for Tx list. */
 1374         error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag,
 1375             (void **)&sc->ste_ldata.ste_tx_list,
 1376             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1377             &sc->ste_cdata.ste_tx_list_map);
 1378         if (error != 0) {
 1379                 device_printf(sc->ste_dev,
 1380                     "could not allocate DMA'able memory for Tx list.\n");
 1381                 goto fail;
 1382         }
 1383         ctx.ste_busaddr = 0;
 1384         error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag,
 1385             sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list,
 1386             STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
 1387         if (error != 0 || ctx.ste_busaddr == 0) {
 1388                 device_printf(sc->ste_dev,
 1389                     "could not load DMA'able memory for Tx list.\n");
 1390                 goto fail;
 1391         }
 1392         sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr;
 1393 
 1394         /* Allocate DMA'able memory and load the DMA map for Rx list. */
 1395         error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag,
 1396             (void **)&sc->ste_ldata.ste_rx_list,
 1397             BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
 1398             &sc->ste_cdata.ste_rx_list_map);
 1399         if (error != 0) {
 1400                 device_printf(sc->ste_dev,
 1401                     "could not allocate DMA'able memory for Rx list.\n");
 1402                 goto fail;
 1403         }
 1404         ctx.ste_busaddr = 0;
 1405         error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag,
 1406             sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list,
 1407             STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
 1408         if (error != 0 || ctx.ste_busaddr == 0) {
 1409                 device_printf(sc->ste_dev,
 1410                     "could not load DMA'able memory for Rx list.\n");
 1411                 goto fail;
 1412         }
 1413         sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr;
 1414 
 1415         /* Create DMA maps for Tx buffers. */
 1416         for (i = 0; i < STE_TX_LIST_CNT; i++) {
 1417                 txc = &sc->ste_cdata.ste_tx_chain[i];
 1418                 txc->ste_ptr = NULL;
 1419                 txc->ste_mbuf = NULL;
 1420                 txc->ste_next = NULL;
 1421                 txc->ste_phys = 0;
 1422                 txc->ste_map = NULL;
 1423                 error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0,
 1424                     &txc->ste_map);
 1425                 if (error != 0) {
 1426                         device_printf(sc->ste_dev,
 1427                             "could not create Tx dmamap.\n");
 1428                         goto fail;
 1429                 }
 1430         }
 1431         /* Create DMA maps for Rx buffers. */
 1432         if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
 1433             &sc->ste_cdata.ste_rx_sparemap)) != 0) {
 1434                 device_printf(sc->ste_dev,
 1435                     "could not create spare Rx dmamap.\n");
 1436                 goto fail;
 1437         }
 1438         for (i = 0; i < STE_RX_LIST_CNT; i++) {
 1439                 rxc = &sc->ste_cdata.ste_rx_chain[i];
 1440                 rxc->ste_ptr = NULL;
 1441                 rxc->ste_mbuf = NULL;
 1442                 rxc->ste_next = NULL;
 1443                 rxc->ste_map = NULL;
 1444                 error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
 1445                     &rxc->ste_map);
 1446                 if (error != 0) {
 1447                         device_printf(sc->ste_dev,
 1448                             "could not create Rx dmamap.\n");
 1449                         goto fail;
 1450                 }
 1451         }
 1452 
 1453 fail:
 1454         return (error);
 1455 }
 1456 
 1457 static void
 1458 ste_dma_free(struct ste_softc *sc)
 1459 {
 1460         struct ste_chain *txc;
 1461         struct ste_chain_onefrag *rxc;
 1462         int i;
 1463 
 1464         /* Tx buffers. */
 1465         if (sc->ste_cdata.ste_tx_tag != NULL) {
 1466                 for (i = 0; i < STE_TX_LIST_CNT; i++) {
 1467                         txc = &sc->ste_cdata.ste_tx_chain[i];
 1468                         if (txc->ste_map != NULL) {
 1469                                 bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag,
 1470                                     txc->ste_map);
 1471                                 txc->ste_map = NULL;
 1472                         }
 1473                 }
 1474                 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag);
 1475                 sc->ste_cdata.ste_tx_tag = NULL;
 1476         }
 1477         /* Rx buffers. */
 1478         if (sc->ste_cdata.ste_rx_tag != NULL) {
 1479                 for (i = 0; i < STE_RX_LIST_CNT; i++) {
 1480                         rxc = &sc->ste_cdata.ste_rx_chain[i];
 1481                         if (rxc->ste_map != NULL) {
 1482                                 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
 1483                                     rxc->ste_map);
 1484                                 rxc->ste_map = NULL;
 1485                         }
 1486                 }
 1487                 if (sc->ste_cdata.ste_rx_sparemap != NULL) {
 1488                         bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
 1489                             sc->ste_cdata.ste_rx_sparemap);
 1490                         sc->ste_cdata.ste_rx_sparemap = NULL;
 1491                 }
 1492                 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag);
 1493                 sc->ste_cdata.ste_rx_tag = NULL;
 1494         }
 1495         /* Tx descriptor list. */
 1496         if (sc->ste_cdata.ste_tx_list_tag != NULL) {
 1497                 if (sc->ste_cdata.ste_tx_list_map != NULL)
 1498                         bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag,
 1499                             sc->ste_cdata.ste_tx_list_map);
 1500                 if (sc->ste_cdata.ste_tx_list_map != NULL &&
 1501                     sc->ste_ldata.ste_tx_list != NULL)
 1502                         bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag,
 1503                             sc->ste_ldata.ste_tx_list,
 1504                             sc->ste_cdata.ste_tx_list_map);
 1505                 sc->ste_ldata.ste_tx_list = NULL;
 1506                 sc->ste_cdata.ste_tx_list_map = NULL;
 1507                 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag);
 1508                 sc->ste_cdata.ste_tx_list_tag = NULL;
 1509         }
 1510         /* Rx descriptor list. */
 1511         if (sc->ste_cdata.ste_rx_list_tag != NULL) {
 1512                 if (sc->ste_cdata.ste_rx_list_map != NULL)
 1513                         bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag,
 1514                             sc->ste_cdata.ste_rx_list_map);
 1515                 if (sc->ste_cdata.ste_rx_list_map != NULL &&
 1516                     sc->ste_ldata.ste_rx_list != NULL)
 1517                         bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag,
 1518                             sc->ste_ldata.ste_rx_list,
 1519                             sc->ste_cdata.ste_rx_list_map);
 1520                 sc->ste_ldata.ste_rx_list = NULL;
 1521                 sc->ste_cdata.ste_rx_list_map = NULL;
 1522                 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag);
 1523                 sc->ste_cdata.ste_rx_list_tag = NULL;
 1524         }
 1525         if (sc->ste_cdata.ste_parent_tag != NULL) {
 1526                 bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag);
 1527                 sc->ste_cdata.ste_parent_tag = NULL;
 1528         }
 1529 }
 1530 
 1531 static int
 1532 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc)
 1533 {
 1534         struct mbuf *m;
 1535         bus_dma_segment_t segs[1];
 1536         bus_dmamap_t map;
 1537         int error, nsegs;
 1538 
 1539         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1540         if (m == NULL)
 1541                 return (ENOBUFS);
 1542         m->m_len = m->m_pkthdr.len = MCLBYTES;
 1543         m_adj(m, ETHER_ALIGN);
 1544 
 1545         if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag,
 1546             sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) {
 1547                 m_freem(m);
 1548                 return (error);
 1549         }
 1550         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1551 
 1552         if (rxc->ste_mbuf != NULL) {
 1553                 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
 1554                     BUS_DMASYNC_POSTREAD);
 1555                 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map);
 1556         }
 1557         map = rxc->ste_map;
 1558         rxc->ste_map = sc->ste_cdata.ste_rx_sparemap;
 1559         sc->ste_cdata.ste_rx_sparemap = map;
 1560         bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
 1561             BUS_DMASYNC_PREREAD);
 1562         rxc->ste_mbuf = m;
 1563         rxc->ste_ptr->ste_status = 0;
 1564         rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr);
 1565         rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len |
 1566             STE_FRAG_LAST);
 1567         return (0);
 1568 }
 1569 
 1570 static int
 1571 ste_init_rx_list(struct ste_softc *sc)
 1572 {
 1573         struct ste_chain_data *cd;
 1574         struct ste_list_data *ld;
 1575         int error, i;
 1576 
 1577         sc->ste_int_rx_act = 0;
 1578         cd = &sc->ste_cdata;
 1579         ld = &sc->ste_ldata;
 1580         bzero(ld->ste_rx_list, STE_RX_LIST_SZ);
 1581         for (i = 0; i < STE_RX_LIST_CNT; i++) {
 1582                 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
 1583                 error = ste_newbuf(sc, &cd->ste_rx_chain[i]);
 1584                 if (error != 0)
 1585                         return (error);
 1586                 if (i == (STE_RX_LIST_CNT - 1)) {
 1587                         cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0];
 1588                         ld->ste_rx_list[i].ste_next =
 1589                             htole32(ld->ste_rx_list_paddr +
 1590                             (sizeof(struct ste_desc_onefrag) * 0));
 1591                 } else {
 1592                         cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1];
 1593                         ld->ste_rx_list[i].ste_next =
 1594                             htole32(ld->ste_rx_list_paddr +
 1595                             (sizeof(struct ste_desc_onefrag) * (i + 1)));
 1596                 }
 1597         }
 1598 
 1599         cd->ste_rx_head = &cd->ste_rx_chain[0];
 1600         bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
 1601             sc->ste_cdata.ste_rx_list_map,
 1602             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1603 
 1604         return (0);
 1605 }
 1606 
 1607 static void
 1608 ste_init_tx_list(struct ste_softc *sc)
 1609 {
 1610         struct ste_chain_data *cd;
 1611         struct ste_list_data *ld;
 1612         int i;
 1613 
 1614         cd = &sc->ste_cdata;
 1615         ld = &sc->ste_ldata;
 1616         bzero(ld->ste_tx_list, STE_TX_LIST_SZ);
 1617         for (i = 0; i < STE_TX_LIST_CNT; i++) {
 1618                 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
 1619                 cd->ste_tx_chain[i].ste_mbuf = NULL;
 1620                 if (i == (STE_TX_LIST_CNT - 1)) {
 1621                         cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0];
 1622                         cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
 1623                             ld->ste_tx_list_paddr +
 1624                             (sizeof(struct ste_desc) * 0)));
 1625                 } else {
 1626                         cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1];
 1627                         cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
 1628                             ld->ste_tx_list_paddr +
 1629                             (sizeof(struct ste_desc) * (i + 1))));
 1630                 }
 1631         }
 1632 
 1633         cd->ste_last_tx = NULL;
 1634         cd->ste_tx_prod = 0;
 1635         cd->ste_tx_cons = 0;
 1636         cd->ste_tx_cnt = 0;
 1637 
 1638         bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
 1639             sc->ste_cdata.ste_tx_list_map,
 1640             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1641 }
 1642 
 1643 static void
 1644 ste_init(void *xsc)
 1645 {
 1646         struct ste_softc *sc;
 1647 
 1648         sc = xsc;
 1649         STE_LOCK(sc);
 1650         ste_init_locked(sc);
 1651         STE_UNLOCK(sc);
 1652 }
 1653 
 1654 static void
 1655 ste_init_locked(struct ste_softc *sc)
 1656 {
 1657         struct ifnet *ifp;
 1658         struct mii_data *mii;
 1659         uint8_t val;
 1660         int i;
 1661 
 1662         STE_LOCK_ASSERT(sc);
 1663         ifp = sc->ste_ifp;
 1664         mii = device_get_softc(sc->ste_miibus);
 1665 
 1666         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1667                 return;
 1668 
 1669         ste_stop(sc);
 1670         /* Reset the chip to a known state. */
 1671         ste_reset(sc);
 1672 
 1673         /* Init our MAC address */
 1674         for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
 1675                 CSR_WRITE_2(sc, STE_PAR0 + i,
 1676                     ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) |
 1677                      IF_LLADDR(sc->ste_ifp)[i + 1] << 8));
 1678         }
 1679 
 1680         /* Init RX list */
 1681         if (ste_init_rx_list(sc) != 0) {
 1682                 device_printf(sc->ste_dev,
 1683                     "initialization failed: no memory for RX buffers\n");
 1684                 ste_stop(sc);
 1685                 return;
 1686         }
 1687 
 1688         /* Set RX polling interval */
 1689         CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
 1690 
 1691         /* Init TX descriptors */
 1692         ste_init_tx_list(sc);
 1693 
 1694         /* Clear and disable WOL. */
 1695         val = CSR_READ_1(sc, STE_WAKE_EVENT);
 1696         val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
 1697             STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
 1698         CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
 1699 
 1700         /* Set the TX freethresh value */
 1701         CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
 1702 
 1703         /* Set the TX start threshold for best performance. */
 1704         CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
 1705 
 1706         /* Set the TX reclaim threshold. */
 1707         CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
 1708 
 1709         /* Accept VLAN length packets */
 1710         CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
 1711 
 1712         /* Set up the RX filter. */
 1713         ste_rxfilter(sc);
 1714 
 1715         /* Load the address of the RX list. */
 1716         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
 1717         ste_wait(sc);
 1718         CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
 1719             STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr));
 1720         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
 1721         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
 1722 
 1723         /* Set TX polling interval(defer until we TX first packet). */
 1724         CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
 1725 
 1726         /* Load address of the TX list */
 1727         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
 1728         ste_wait(sc);
 1729         CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
 1730         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
 1731         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
 1732         ste_wait(sc);
 1733         /* Select 3.2us timer. */
 1734         STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED |
 1735             STE_DMACTL_COUNTDOWN_MODE);
 1736 
 1737         /* Enable receiver and transmitter */
 1738         CSR_WRITE_2(sc, STE_MACCTL0, 0);
 1739         CSR_WRITE_2(sc, STE_MACCTL1, 0);
 1740         STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
 1741         STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
 1742 
 1743         /* Enable stats counters. */
 1744         STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
 1745         /* Clear stats counters. */
 1746         ste_stats_clear(sc);
 1747 
 1748         CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
 1749         CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
 1750 #ifdef DEVICE_POLLING
 1751         /* Disable interrupts if we are polling. */
 1752         if (ifp->if_capenable & IFCAP_POLLING)
 1753                 CSR_WRITE_2(sc, STE_IMR, 0);
 1754         else
 1755 #endif
 1756         /* Enable interrupts. */
 1757         CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
 1758 
 1759         sc->ste_flags &= ~STE_FLAG_LINK;
 1760         /* Switch to the current media. */
 1761         mii_mediachg(mii);
 1762 
 1763         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 1764         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1765 
 1766         callout_reset(&sc->ste_callout, hz, ste_tick, sc);
 1767 }
 1768 
 1769 static void
 1770 ste_stop(struct ste_softc *sc)
 1771 {
 1772         struct ifnet *ifp;
 1773         struct ste_chain_onefrag *cur_rx;
 1774         struct ste_chain *cur_tx;
 1775         uint32_t val;
 1776         int i;
 1777 
 1778         STE_LOCK_ASSERT(sc);
 1779         ifp = sc->ste_ifp;
 1780 
 1781         callout_stop(&sc->ste_callout);
 1782         sc->ste_timer = 0;
 1783         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
 1784 
 1785         CSR_WRITE_2(sc, STE_IMR, 0);
 1786         CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
 1787         /* Stop pending DMA. */
 1788         val = CSR_READ_4(sc, STE_DMACTL);
 1789         val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL;
 1790         CSR_WRITE_4(sc, STE_DMACTL, val);
 1791         ste_wait(sc);
 1792         /* Disable auto-polling. */
 1793         CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0);
 1794         CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
 1795         /* Nullify DMA address to stop any further DMA. */
 1796         CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0);
 1797         CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
 1798         /* Stop TX/RX MAC. */
 1799         val = CSR_READ_2(sc, STE_MACCTL1);
 1800         val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE |
 1801             STE_MACCTL1_STATS_DISABLE;
 1802         CSR_WRITE_2(sc, STE_MACCTL1, val);
 1803         for (i = 0; i < STE_TIMEOUT; i++) {
 1804                 DELAY(10);
 1805                 if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE |
 1806                     STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0)
 1807                         break;
 1808         }
 1809         if (i == STE_TIMEOUT)
 1810                 device_printf(sc->ste_dev, "Stopping MAC timed out\n");
 1811         /* Acknowledge any pending interrupts. */
 1812         CSR_READ_2(sc, STE_ISR_ACK);
 1813         ste_stats_update(sc);
 1814 
 1815         for (i = 0; i < STE_RX_LIST_CNT; i++) {
 1816                 cur_rx = &sc->ste_cdata.ste_rx_chain[i];
 1817                 if (cur_rx->ste_mbuf != NULL) {
 1818                         bus_dmamap_sync(sc->ste_cdata.ste_rx_tag,
 1819                             cur_rx->ste_map, BUS_DMASYNC_POSTREAD);
 1820                         bus_dmamap_unload(sc->ste_cdata.ste_rx_tag,
 1821                             cur_rx->ste_map);
 1822                         m_freem(cur_rx->ste_mbuf);
 1823                         cur_rx->ste_mbuf = NULL;
 1824                 }
 1825         }
 1826 
 1827         for (i = 0; i < STE_TX_LIST_CNT; i++) {
 1828                 cur_tx = &sc->ste_cdata.ste_tx_chain[i];
 1829                 if (cur_tx->ste_mbuf != NULL) {
 1830                         bus_dmamap_sync(sc->ste_cdata.ste_tx_tag,
 1831                             cur_tx->ste_map, BUS_DMASYNC_POSTWRITE);
 1832                         bus_dmamap_unload(sc->ste_cdata.ste_tx_tag,
 1833                             cur_tx->ste_map);
 1834                         m_freem(cur_tx->ste_mbuf);
 1835                         cur_tx->ste_mbuf = NULL;
 1836                 }
 1837         }
 1838 }
 1839 
 1840 static void
 1841 ste_reset(struct ste_softc *sc)
 1842 {
 1843         uint32_t ctl;
 1844         int i;
 1845 
 1846         ctl = CSR_READ_4(sc, STE_ASICCTL);
 1847         ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET |
 1848             STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET |
 1849             STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET |
 1850             STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET |
 1851             STE_ASICCTL_EXTRESET_RESET;
 1852         CSR_WRITE_4(sc, STE_ASICCTL, ctl);
 1853         CSR_READ_4(sc, STE_ASICCTL);
 1854         /*
 1855          * Due to the need of accessing EEPROM controller can take
 1856          * up to 1ms to complete the global reset.
 1857          */
 1858         DELAY(1000);
 1859 
 1860         for (i = 0; i < STE_TIMEOUT; i++) {
 1861                 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
 1862                         break;
 1863                 DELAY(10);
 1864         }
 1865 
 1866         if (i == STE_TIMEOUT)
 1867                 device_printf(sc->ste_dev, "global reset never completed\n");
 1868 }
 1869 
 1870 static void
 1871 ste_restart_tx(struct ste_softc *sc)
 1872 {
 1873         uint16_t mac;
 1874         int i;
 1875 
 1876         for (i = 0; i < STE_TIMEOUT; i++) {
 1877                 mac = CSR_READ_2(sc, STE_MACCTL1);
 1878                 mac |= STE_MACCTL1_TX_ENABLE;
 1879                 CSR_WRITE_2(sc, STE_MACCTL1, mac);
 1880                 mac = CSR_READ_2(sc, STE_MACCTL1);
 1881                 if ((mac & STE_MACCTL1_TX_ENABLED) != 0)
 1882                         break;
 1883                 DELAY(10);
 1884         }
 1885 
 1886         if (i == STE_TIMEOUT)
 1887                 device_printf(sc->ste_dev, "starting Tx failed");
 1888 }
 1889 
 1890 static int
 1891 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1892 {
 1893         struct ste_softc *sc;
 1894         struct ifreq *ifr;
 1895         struct mii_data *mii;
 1896         int error = 0, mask;
 1897 
 1898         sc = ifp->if_softc;
 1899         ifr = (struct ifreq *)data;
 1900 
 1901         switch (command) {
 1902         case SIOCSIFFLAGS:
 1903                 STE_LOCK(sc);
 1904                 if ((ifp->if_flags & IFF_UP) != 0) {
 1905                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1906                             ((ifp->if_flags ^ sc->ste_if_flags) &
 1907                              (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1908                                 ste_rxfilter(sc);
 1909                         else
 1910                                 ste_init_locked(sc);
 1911                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1912                         ste_stop(sc);
 1913                 sc->ste_if_flags = ifp->if_flags;
 1914                 STE_UNLOCK(sc);
 1915                 break;
 1916         case SIOCADDMULTI:
 1917         case SIOCDELMULTI:
 1918                 STE_LOCK(sc);
 1919                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1920                         ste_rxfilter(sc);
 1921                 STE_UNLOCK(sc);
 1922                 break;
 1923         case SIOCGIFMEDIA:
 1924         case SIOCSIFMEDIA:
 1925                 mii = device_get_softc(sc->ste_miibus);
 1926                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1927                 break;
 1928         case SIOCSIFCAP:
 1929                 STE_LOCK(sc);
 1930                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1931 #ifdef DEVICE_POLLING
 1932                 if ((mask & IFCAP_POLLING) != 0 &&
 1933                     (IFCAP_POLLING & ifp->if_capabilities) != 0) {
 1934                         ifp->if_capenable ^= IFCAP_POLLING;
 1935                         if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
 1936                                 error = ether_poll_register(ste_poll, ifp);
 1937                                 if (error != 0) {
 1938                                         STE_UNLOCK(sc);
 1939                                         break;
 1940                                 }
 1941                                 /* Disable interrupts. */
 1942                                 CSR_WRITE_2(sc, STE_IMR, 0);
 1943                         } else {
 1944                                 error = ether_poll_deregister(ifp);
 1945                                 /* Enable interrupts. */
 1946                                 CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
 1947                         }
 1948                 }
 1949 #endif /* DEVICE_POLLING */
 1950                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 1951                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 1952                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 1953                 STE_UNLOCK(sc);
 1954                 break;
 1955         default:
 1956                 error = ether_ioctl(ifp, command, data);
 1957                 break;
 1958         }
 1959 
 1960         return (error);
 1961 }
 1962 
 1963 static int
 1964 ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc)
 1965 {
 1966         struct ste_frag *frag;
 1967         struct mbuf *m;
 1968         struct ste_desc *desc;
 1969         bus_dma_segment_t txsegs[STE_MAXFRAGS];
 1970         int error, i, nsegs;
 1971 
 1972         STE_LOCK_ASSERT(sc);
 1973         M_ASSERTPKTHDR((*m_head));
 1974 
 1975         error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
 1976             txc->ste_map, *m_head, txsegs, &nsegs, 0);
 1977         if (error == EFBIG) {
 1978                 m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS);
 1979                 if (m == NULL) {
 1980                         m_freem(*m_head);
 1981                         *m_head = NULL;
 1982                         return (ENOMEM);
 1983                 }
 1984                 *m_head = m;
 1985                 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
 1986                     txc->ste_map, *m_head, txsegs, &nsegs, 0);
 1987                 if (error != 0) {
 1988                         m_freem(*m_head);
 1989                         *m_head = NULL;
 1990                         return (error);
 1991                 }
 1992         } else if (error != 0)
 1993                 return (error);
 1994         if (nsegs == 0) {
 1995                 m_freem(*m_head);
 1996                 *m_head = NULL;
 1997                 return (EIO);
 1998         }
 1999         bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map,
 2000             BUS_DMASYNC_PREWRITE);
 2001 
 2002         desc = txc->ste_ptr;
 2003         for (i = 0; i < nsegs; i++) {
 2004                 frag = &desc->ste_frags[i];
 2005                 frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr));
 2006                 frag->ste_len = htole32(txsegs[i].ds_len);
 2007         }
 2008         desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST);
 2009         /*
 2010          * Because we use Tx polling we can't chain multiple
 2011          * Tx descriptors here. Otherwise we race with controller.
 2012          */
 2013         desc->ste_next = 0;
 2014         if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0)
 2015                 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS |
 2016                     STE_TXCTL_DMAINTR);
 2017         else
 2018                 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS);
 2019         txc->ste_mbuf = *m_head;
 2020         STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT);
 2021         sc->ste_cdata.ste_tx_cnt++;
 2022 
 2023         return (0);
 2024 }
 2025 
 2026 static void
 2027 ste_start(struct ifnet *ifp)
 2028 {
 2029         struct ste_softc *sc;
 2030 
 2031         sc = ifp->if_softc;
 2032         STE_LOCK(sc);
 2033         ste_start_locked(ifp);
 2034         STE_UNLOCK(sc);
 2035 }
 2036 
 2037 static void
 2038 ste_start_locked(struct ifnet *ifp)
 2039 {
 2040         struct ste_softc *sc;
 2041         struct ste_chain *cur_tx;
 2042         struct mbuf *m_head = NULL;
 2043         int enq;
 2044 
 2045         sc = ifp->if_softc;
 2046         STE_LOCK_ASSERT(sc);
 2047 
 2048         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2049             IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0)
 2050                 return;
 2051 
 2052         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
 2053                 if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) {
 2054                         /*
 2055                          * Controller may have cached copy of the last used
 2056                          * next ptr so we have to reserve one TFD to avoid
 2057                          * TFD overruns.
 2058                          */
 2059                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2060                         break;
 2061                 }
 2062                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2063                 if (m_head == NULL)
 2064                         break;
 2065                 cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod];
 2066                 if (ste_encap(sc, &m_head, cur_tx) != 0) {
 2067                         if (m_head == NULL)
 2068                                 break;
 2069                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2070                         break;
 2071                 }
 2072                 if (sc->ste_cdata.ste_last_tx == NULL) {
 2073                         bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
 2074                             sc->ste_cdata.ste_tx_list_map,
 2075                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2076                         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
 2077                         ste_wait(sc);
 2078                         CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
 2079                             STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr));
 2080                         CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
 2081                         STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
 2082                         ste_wait(sc);
 2083                 } else {
 2084                         sc->ste_cdata.ste_last_tx->ste_ptr->ste_next =
 2085                             sc->ste_cdata.ste_last_tx->ste_phys;
 2086                         bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
 2087                             sc->ste_cdata.ste_tx_list_map,
 2088                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2089                 }
 2090                 sc->ste_cdata.ste_last_tx = cur_tx;
 2091 
 2092                 enq++;
 2093                 /*
 2094                  * If there's a BPF listener, bounce a copy of this frame
 2095                  * to him.
 2096                  */
 2097                 BPF_MTAP(ifp, m_head);
 2098         }
 2099 
 2100         if (enq > 0)
 2101                 sc->ste_timer = STE_TX_TIMEOUT;
 2102 }
 2103 
 2104 static void
 2105 ste_watchdog(struct ste_softc *sc)
 2106 {
 2107         struct ifnet *ifp;
 2108 
 2109         ifp = sc->ste_ifp;
 2110         STE_LOCK_ASSERT(sc);
 2111 
 2112         if (sc->ste_timer == 0 || --sc->ste_timer)
 2113                 return;
 2114 
 2115         ifp->if_oerrors++;
 2116         if_printf(ifp, "watchdog timeout\n");
 2117 
 2118         ste_txeof(sc);
 2119         ste_txeoc(sc);
 2120         ste_rxeof(sc, -1);
 2121         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2122         ste_init_locked(sc);
 2123 
 2124         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2125                 ste_start_locked(ifp);
 2126 }
 2127 
 2128 static int
 2129 ste_shutdown(device_t dev)
 2130 {
 2131 
 2132         return (ste_suspend(dev));
 2133 }
 2134 
 2135 static int
 2136 ste_suspend(device_t dev)
 2137 {
 2138         struct ste_softc *sc;
 2139 
 2140         sc = device_get_softc(dev);
 2141 
 2142         STE_LOCK(sc);
 2143         ste_stop(sc);
 2144         ste_setwol(sc);
 2145         STE_UNLOCK(sc);
 2146 
 2147         return (0);
 2148 }
 2149 
 2150 static int
 2151 ste_resume(device_t dev)
 2152 {
 2153         struct ste_softc *sc;
 2154         struct ifnet *ifp;
 2155         int pmc;
 2156         uint16_t pmstat;
 2157 
 2158         sc = device_get_softc(dev);
 2159         STE_LOCK(sc);
 2160         if (pci_find_extcap(sc->ste_dev, PCIY_PMG, &pmc) == 0) {
 2161                 /* Disable PME and clear PME status. */
 2162                 pmstat = pci_read_config(sc->ste_dev,
 2163                     pmc + PCIR_POWER_STATUS, 2);
 2164                 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
 2165                         pmstat &= ~PCIM_PSTAT_PMEENABLE;
 2166                         pci_write_config(sc->ste_dev,
 2167                             pmc + PCIR_POWER_STATUS, pmstat, 2);
 2168                 }
 2169         }
 2170         ifp = sc->ste_ifp;
 2171         if ((ifp->if_flags & IFF_UP) != 0) {
 2172                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2173                 ste_init_locked(sc);
 2174         }
 2175         STE_UNLOCK(sc);
 2176 
 2177         return (0);
 2178 }
 2179 
 2180 #define STE_SYSCTL_STAT_ADD32(c, h, n, p, d)    \
 2181             SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
 2182 #define STE_SYSCTL_STAT_ADD64(c, h, n, p, d)    \
 2183             SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
 2184 
 2185 static void
 2186 ste_sysctl_node(struct ste_softc *sc)
 2187 {
 2188         struct sysctl_ctx_list *ctx;
 2189         struct sysctl_oid_list *child, *parent;
 2190         struct sysctl_oid *tree;
 2191         struct ste_hw_stats *stats;
 2192 
 2193         stats = &sc->ste_stats;
 2194         ctx = device_get_sysctl_ctx(sc->ste_dev);
 2195         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev));
 2196 
 2197         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod",
 2198             CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation");
 2199         /* Pull in device tunables. */
 2200         sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT;
 2201         resource_int_value(device_get_name(sc->ste_dev),
 2202             device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod);
 2203 
 2204         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 2205             NULL, "STE statistics");
 2206         parent = SYSCTL_CHILDREN(tree);
 2207 
 2208         /* Rx statistics. */
 2209         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
 2210             NULL, "Rx MAC statistics");
 2211         child = SYSCTL_CHILDREN(tree);
 2212         STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
 2213             &stats->rx_bytes, "Good octets");
 2214         STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
 2215             &stats->rx_frames, "Good frames");
 2216         STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
 2217             &stats->rx_bcast_frames, "Good broadcast frames");
 2218         STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
 2219             &stats->rx_mcast_frames, "Good multicast frames");
 2220         STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames",
 2221             &stats->rx_lost_frames, "Lost frames");
 2222 
 2223         /* Tx statistics. */
 2224         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
 2225             NULL, "Tx MAC statistics");
 2226         child = SYSCTL_CHILDREN(tree);
 2227         STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
 2228             &stats->tx_bytes, "Good octets");
 2229         STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
 2230             &stats->tx_frames, "Good frames");
 2231         STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
 2232             &stats->tx_bcast_frames, "Good broadcast frames");
 2233         STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
 2234             &stats->tx_mcast_frames, "Good multicast frames");
 2235         STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs",
 2236             &stats->tx_carrsense_errs, "Carrier sense errors");
 2237         STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
 2238             &stats->tx_single_colls, "Single collisions");
 2239         STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
 2240             &stats->tx_multi_colls, "Multiple collisions");
 2241         STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
 2242             &stats->tx_late_colls, "Late collisions");
 2243         STE_SYSCTL_STAT_ADD32(ctx, child, "defers",
 2244             &stats->tx_frames_defered, "Frames with deferrals");
 2245         STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
 2246             &stats->tx_excess_defers, "Frames with excessive derferrals");
 2247         STE_SYSCTL_STAT_ADD32(ctx, child, "abort",
 2248             &stats->tx_abort, "Aborted frames due to Excessive collisions");
 2249 }
 2250 
 2251 #undef STE_SYSCTL_STAT_ADD32
 2252 #undef STE_SYSCTL_STAT_ADD64
 2253 
 2254 static void
 2255 ste_setwol(struct ste_softc *sc)
 2256 {
 2257         struct ifnet *ifp;
 2258         uint16_t pmstat;
 2259         uint8_t val;
 2260         int pmc;
 2261 
 2262         STE_LOCK_ASSERT(sc);
 2263 
 2264         if (pci_find_extcap(sc->ste_dev, PCIY_PMG, &pmc) != 0) {
 2265                 /* Disable WOL. */
 2266                 CSR_READ_1(sc, STE_WAKE_EVENT);
 2267                 CSR_WRITE_1(sc, STE_WAKE_EVENT, 0);
 2268                 return;
 2269         }
 2270 
 2271         ifp = sc->ste_ifp;
 2272         val = CSR_READ_1(sc, STE_WAKE_EVENT);
 2273         val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
 2274             STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
 2275         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 2276                 val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB;
 2277         CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
 2278         /* Request PME. */
 2279         pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2);
 2280         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
 2281         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 2282                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
 2283         pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
 2284 }

Cache object: 0a6be5c39d2e37bd5fb5799c5a3af498


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.