The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/xl/if_xl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/8.4/sys/dev/xl/if_xl.c 242909 2012-11-12 07:47:19Z dim $");
   35 
   36 /*
   37  * 3Com 3c90x Etherlink XL PCI NIC driver
   38  *
   39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
   40  * bus-master chips (3c90x cards and embedded controllers) including
   41  * the following:
   42  *
   43  * 3Com 3c900-TPO       10Mbps/RJ-45
   44  * 3Com 3c900-COMBO     10Mbps/RJ-45,AUI,BNC
   45  * 3Com 3c905-TX        10/100Mbps/RJ-45
   46  * 3Com 3c905-T4        10/100Mbps/RJ-45
   47  * 3Com 3c900B-TPO      10Mbps/RJ-45
   48  * 3Com 3c900B-COMBO    10Mbps/RJ-45,AUI,BNC
   49  * 3Com 3c900B-TPC      10Mbps/RJ-45,BNC
   50  * 3Com 3c900B-FL       10Mbps/Fiber-optic
   51  * 3Com 3c905B-COMBO    10/100Mbps/RJ-45,AUI,BNC
   52  * 3Com 3c905B-TX       10/100Mbps/RJ-45
   53  * 3Com 3c905B-FL/FX    10/100Mbps/Fiber-optic
   54  * 3Com 3c905C-TX       10/100Mbps/RJ-45 (Tornado ASIC)
   55  * 3Com 3c980-TX        10/100Mbps server adapter (Hurricane ASIC)
   56  * 3Com 3c980C-TX       10/100Mbps server adapter (Tornado ASIC)
   57  * 3Com 3cSOHO100-TX    10/100Mbps/RJ-45 (Hurricane ASIC)
   58  * 3Com 3c450-TX        10/100Mbps/RJ-45 (Tornado ASIC)
   59  * 3Com 3c555           10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
   60  * 3Com 3c556           10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   61  * 3Com 3c556B          10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   62  * 3Com 3c575TX         10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   63  * 3Com 3c575B          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   64  * 3Com 3c575C          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   65  * 3Com 3cxfem656       10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   66  * 3Com 3cxfem656b      10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   67  * 3Com 3cxfem656c      10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
   68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
   69  * Dell on-board 3c920 10/100Mbps/RJ-45
   70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
   71  * Dell Latitude laptop docking station embedded 3c905-TX
   72  *
   73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   74  * Electrical Engineering Department
   75  * Columbia University, New York City
   76  */
   77 /*
   78  * The 3c90x series chips use a bus-master DMA interface for transfering
   79  * packets to and from the controller chip. Some of the "vortex" cards
   80  * (3c59x) also supported a bus master mode, however for those chips
   81  * you could only DMA packets to/from a contiguous memory buffer. For
   82  * transmission this would mean copying the contents of the queued mbuf
   83  * chain into an mbuf cluster and then DMAing the cluster. This extra
   84  * copy would sort of defeat the purpose of the bus master support for
   85  * any packet that doesn't fit into a single mbuf.
   86  *
   87  * By contrast, the 3c90x cards support a fragment-based bus master
   88  * mode where mbuf chains can be encapsulated using TX descriptors.
   89  * This is similar to other PCI chips such as the Texas Instruments
   90  * ThunderLAN and the Intel 82557/82558.
   91  *
   92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
   93  * bus master chips because they maintain the old PIO interface for
   94  * backwards compatibility, but starting with the 3c905B and the
   95  * "cyclone" chips, the compatibility interface has been dropped.
   96  * Since using bus master DMA is a big win, we use this driver to
   97  * support the PCI "boomerang" chips even though they work with the
   98  * "vortex" driver in order to obtain better performance.
   99  */
  100 
  101 #ifdef HAVE_KERNEL_OPTION_HEADERS
  102 #include "opt_device_polling.h"
  103 #endif
  104 
  105 #include <sys/param.h>
  106 #include <sys/systm.h>
  107 #include <sys/sockio.h>
  108 #include <sys/endian.h>
  109 #include <sys/mbuf.h>
  110 #include <sys/kernel.h>
  111 #include <sys/module.h>
  112 #include <sys/socket.h>
  113 #include <sys/taskqueue.h>
  114 
  115 #include <net/if.h>
  116 #include <net/if_arp.h>
  117 #include <net/ethernet.h>
  118 #include <net/if_dl.h>
  119 #include <net/if_media.h>
  120 #include <net/if_types.h>
  121 
  122 #include <net/bpf.h>
  123 
  124 #include <machine/bus.h>
  125 #include <machine/resource.h>
  126 #include <sys/bus.h>
  127 #include <sys/rman.h>
  128 
  129 #include <dev/mii/mii.h>
  130 #include <dev/mii/mii_bitbang.h>
  131 #include <dev/mii/miivar.h>
  132 
  133 #include <dev/pci/pcireg.h>
  134 #include <dev/pci/pcivar.h>
  135 
  136 MODULE_DEPEND(xl, pci, 1, 1, 1);
  137 MODULE_DEPEND(xl, ether, 1, 1, 1);
  138 MODULE_DEPEND(xl, miibus, 1, 1, 1);
  139 
  140 /* "device miibus" required.  See GENERIC if you get errors here. */
  141 #include "miibus_if.h"
  142 
  143 #include <dev/xl/if_xlreg.h>
  144 
  145 /*
  146  * TX Checksumming is disabled by default for two reasons:
  147  * - TX Checksumming will occasionally produce corrupt packets
  148  * - TX Checksumming seems to reduce performance
  149  *
  150  * Only 905B/C cards were reported to have this problem, it is possible
  151  * that later chips _may_ be immune.
  152  */
  153 #define XL905B_TXCSUM_BROKEN    1
  154 
  155 #ifdef XL905B_TXCSUM_BROKEN
  156 #define XL905B_CSUM_FEATURES    0
  157 #else
  158 #define XL905B_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  159 #endif
  160 
  161 /*
  162  * Various supported device vendors/types and their names.
  163  */
  164 static const struct xl_type xl_devs[] = {
  165         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
  166                 "3Com 3c900-TPO Etherlink XL" },
  167         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
  168                 "3Com 3c900-COMBO Etherlink XL" },
  169         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
  170                 "3Com 3c905-TX Fast Etherlink XL" },
  171         { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
  172                 "3Com 3c905-T4 Fast Etherlink XL" },
  173         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
  174                 "3Com 3c900B-TPO Etherlink XL" },
  175         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
  176                 "3Com 3c900B-COMBO Etherlink XL" },
  177         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
  178                 "3Com 3c900B-TPC Etherlink XL" },
  179         { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
  180                 "3Com 3c900B-FL Etherlink XL" },
  181         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
  182                 "3Com 3c905B-TX Fast Etherlink XL" },
  183         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
  184                 "3Com 3c905B-T4 Fast Etherlink XL" },
  185         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
  186                 "3Com 3c905B-FX/SC Fast Etherlink XL" },
  187         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
  188                 "3Com 3c905B-COMBO Fast Etherlink XL" },
  189         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
  190                 "3Com 3c905C-TX Fast Etherlink XL" },
  191         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
  192                 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
  193         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
  194                 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
  195         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
  196                 "3Com 3c980 Fast Etherlink XL" },
  197         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
  198                 "3Com 3c980C Fast Etherlink XL" },
  199         { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
  200                 "3Com 3cSOHO100-TX OfficeConnect" },
  201         { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
  202                 "3Com 3c450-TX HomeConnect" },
  203         { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
  204                 "3Com 3c555 Fast Etherlink XL" },
  205         { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
  206                 "3Com 3c556 Fast Etherlink XL" },
  207         { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
  208                 "3Com 3c556B Fast Etherlink XL" },
  209         { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
  210                 "3Com 3c575TX Fast Etherlink XL" },
  211         { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
  212                 "3Com 3c575B Fast Etherlink XL" },
  213         { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
  214                 "3Com 3c575C Fast Etherlink XL" },
  215         { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
  216                 "3Com 3c656 Fast Etherlink XL" },
  217         { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
  218                 "3Com 3c656B Fast Etherlink XL" },
  219         { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
  220                 "3Com 3c656C Fast Etherlink XL" },
  221         { 0, 0, NULL }
  222 };
  223 
  224 static int xl_probe(device_t);
  225 static int xl_attach(device_t);
  226 static int xl_detach(device_t);
  227 
  228 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
  229 static void xl_tick(void *);
  230 static void xl_stats_update(struct xl_softc *);
  231 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
  232 static int xl_rxeof(struct xl_softc *);
  233 static void xl_rxeof_task(void *, int);
  234 static int xl_rx_resync(struct xl_softc *);
  235 static void xl_txeof(struct xl_softc *);
  236 static void xl_txeof_90xB(struct xl_softc *);
  237 static void xl_txeoc(struct xl_softc *);
  238 static void xl_intr(void *);
  239 static void xl_start(struct ifnet *);
  240 static void xl_start_locked(struct ifnet *);
  241 static void xl_start_90xB_locked(struct ifnet *);
  242 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
  243 static void xl_init(void *);
  244 static void xl_init_locked(struct xl_softc *);
  245 static void xl_stop(struct xl_softc *);
  246 static int xl_watchdog(struct xl_softc *);
  247 static int xl_shutdown(device_t);
  248 static int xl_suspend(device_t);
  249 static int xl_resume(device_t);
  250 static void xl_setwol(struct xl_softc *);
  251 
  252 #ifdef DEVICE_POLLING
  253 static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
  254 static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
  255 #endif
  256 
  257 static int xl_ifmedia_upd(struct ifnet *);
  258 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  259 
  260 static int xl_eeprom_wait(struct xl_softc *);
  261 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
  262 
  263 static void xl_rxfilter(struct xl_softc *);
  264 static void xl_rxfilter_90x(struct xl_softc *);
  265 static void xl_rxfilter_90xB(struct xl_softc *);
  266 static void xl_setcfg(struct xl_softc *);
  267 static void xl_setmode(struct xl_softc *, int);
  268 static void xl_reset(struct xl_softc *);
  269 static int xl_list_rx_init(struct xl_softc *);
  270 static int xl_list_tx_init(struct xl_softc *);
  271 static int xl_list_tx_init_90xB(struct xl_softc *);
  272 static void xl_wait(struct xl_softc *);
  273 static void xl_mediacheck(struct xl_softc *);
  274 static void xl_choose_media(struct xl_softc *sc, int *media);
  275 static void xl_choose_xcvr(struct xl_softc *, int);
  276 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  277 #ifdef notdef
  278 static void xl_testpacket(struct xl_softc *);
  279 #endif
  280 
  281 static int xl_miibus_readreg(device_t, int, int);
  282 static int xl_miibus_writereg(device_t, int, int, int);
  283 static void xl_miibus_statchg(device_t);
  284 static void xl_miibus_mediainit(device_t);
  285 
  286 /*
  287  * MII bit-bang glue
  288  */
  289 static uint32_t xl_mii_bitbang_read(device_t);
  290 static void xl_mii_bitbang_write(device_t, uint32_t);
  291 
  292 static const struct mii_bitbang_ops xl_mii_bitbang_ops = {
  293         xl_mii_bitbang_read,
  294         xl_mii_bitbang_write,
  295         {
  296                 XL_MII_DATA,            /* MII_BIT_MDO */
  297                 XL_MII_DATA,            /* MII_BIT_MDI */
  298                 XL_MII_CLK,             /* MII_BIT_MDC */
  299                 XL_MII_DIR,             /* MII_BIT_DIR_HOST_PHY */
  300                 0,                      /* MII_BIT_DIR_PHY_HOST */
  301         }
  302 };
  303 
  304 static device_method_t xl_methods[] = {
  305         /* Device interface */
  306         DEVMETHOD(device_probe,         xl_probe),
  307         DEVMETHOD(device_attach,        xl_attach),
  308         DEVMETHOD(device_detach,        xl_detach),
  309         DEVMETHOD(device_shutdown,      xl_shutdown),
  310         DEVMETHOD(device_suspend,       xl_suspend),
  311         DEVMETHOD(device_resume,        xl_resume),
  312 
  313         /* MII interface */
  314         DEVMETHOD(miibus_readreg,       xl_miibus_readreg),
  315         DEVMETHOD(miibus_writereg,      xl_miibus_writereg),
  316         DEVMETHOD(miibus_statchg,       xl_miibus_statchg),
  317         DEVMETHOD(miibus_mediainit,     xl_miibus_mediainit),
  318 
  319         DEVMETHOD_END
  320 };
  321 
  322 static driver_t xl_driver = {
  323         "xl",
  324         xl_methods,
  325         sizeof(struct xl_softc)
  326 };
  327 
  328 static devclass_t xl_devclass;
  329 
  330 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
  331 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
  332 
  333 static void
  334 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  335 {
  336         u_int32_t *paddr;
  337 
  338         paddr = arg;
  339         *paddr = segs->ds_addr;
  340 }
  341 
  342 /*
  343  * Murphy's law says that it's possible the chip can wedge and
  344  * the 'command in progress' bit may never clear. Hence, we wait
  345  * only a finite amount of time to avoid getting caught in an
  346  * infinite loop. Normally this delay routine would be a macro,
  347  * but it isn't called during normal operation so we can afford
  348  * to make it a function.
  349  */
  350 static void
  351 xl_wait(struct xl_softc *sc)
  352 {
  353         register int            i;
  354 
  355         for (i = 0; i < XL_TIMEOUT; i++) {
  356                 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
  357                         break;
  358         }
  359 
  360         if (i == XL_TIMEOUT)
  361                 device_printf(sc->xl_dev, "command never completed!\n");
  362 }
  363 
  364 /*
  365  * MII access routines are provided for adapters with external
  366  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
  367  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
  368  * Note: if you don't perform the MDIO operations just right,
  369  * it's possible to end up with code that works correctly with
  370  * some chips/CPUs/processor speeds/bus speeds/etc but not
  371  * with others.
  372  */
  373 
  374 /*
  375  * Read the MII serial port for the MII bit-bang module.
  376  */
  377 static uint32_t
  378 xl_mii_bitbang_read(device_t dev)
  379 {
  380         struct xl_softc         *sc;
  381         uint32_t                val;
  382 
  383         sc = device_get_softc(dev);
  384 
  385         /* We're already in window 4. */
  386         val = CSR_READ_2(sc, XL_W4_PHY_MGMT);
  387         CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
  388             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  389 
  390         return (val);
  391 }
  392 
  393 /*
  394  * Write the MII serial port for the MII bit-bang module.
  395  */
  396 static void
  397 xl_mii_bitbang_write(device_t dev, uint32_t val)
  398 {
  399         struct xl_softc         *sc;
  400 
  401         sc = device_get_softc(dev);
  402 
  403         /* We're already in window 4. */
  404         CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val);
  405         CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
  406             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  407 }
  408 
  409 static int
  410 xl_miibus_readreg(device_t dev, int phy, int reg)
  411 {
  412         struct xl_softc         *sc;
  413 
  414         sc = device_get_softc(dev);
  415 
  416         /* Select the window 4. */
  417         XL_SEL_WIN(4);
  418 
  419         return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg));
  420 }
  421 
  422 static int
  423 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
  424 {
  425         struct xl_softc         *sc;
  426 
  427         sc = device_get_softc(dev);
  428 
  429         /* Select the window 4. */
  430         XL_SEL_WIN(4);
  431 
  432         mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data);
  433 
  434         return (0);
  435 }
  436 
  437 static void
  438 xl_miibus_statchg(device_t dev)
  439 {
  440         struct xl_softc         *sc;
  441         struct mii_data         *mii;
  442         uint8_t                 macctl;
  443 
  444         sc = device_get_softc(dev);
  445         mii = device_get_softc(sc->xl_miibus);
  446 
  447         xl_setcfg(sc);
  448 
  449         /* Set ASIC's duplex mode to match the PHY. */
  450         XL_SEL_WIN(3);
  451         macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
  452         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  453                 macctl |= XL_MACCTRL_DUPLEX;
  454                 if (sc->xl_type == XL_TYPE_905B) {
  455                         if ((IFM_OPTIONS(mii->mii_media_active) &
  456                             IFM_ETH_RXPAUSE) != 0)
  457                                 macctl |= XL_MACCTRL_FLOW_CONTROL_ENB;
  458                         else
  459                                 macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  460                 }
  461         } else {
  462                 macctl &= ~XL_MACCTRL_DUPLEX;
  463                 if (sc->xl_type == XL_TYPE_905B)
  464                         macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  465         }
  466         CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
  467 }
  468 
  469 /*
  470  * Special support for the 3c905B-COMBO. This card has 10/100 support
  471  * plus BNC and AUI ports. This means we will have both an miibus attached
  472  * plus some non-MII media settings. In order to allow this, we have to
  473  * add the extra media to the miibus's ifmedia struct, but we can't do
  474  * that during xl_attach() because the miibus hasn't been attached yet.
  475  * So instead, we wait until the miibus probe/attach is done, at which
  476  * point we will get a callback telling is that it's safe to add our
  477  * extra media.
  478  */
  479 static void
  480 xl_miibus_mediainit(device_t dev)
  481 {
  482         struct xl_softc         *sc;
  483         struct mii_data         *mii;
  484         struct ifmedia          *ifm;
  485 
  486         sc = device_get_softc(dev);
  487         mii = device_get_softc(sc->xl_miibus);
  488         ifm = &mii->mii_media;
  489 
  490         if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
  491                 /*
  492                  * Check for a 10baseFL board in disguise.
  493                  */
  494                 if (sc->xl_type == XL_TYPE_905B &&
  495                     sc->xl_media == XL_MEDIAOPT_10FL) {
  496                         if (bootverbose)
  497                                 device_printf(sc->xl_dev, "found 10baseFL\n");
  498                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
  499                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
  500                             NULL);
  501                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
  502                                 ifmedia_add(ifm,
  503                                     IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
  504                 } else {
  505                         if (bootverbose)
  506                                 device_printf(sc->xl_dev, "found AUI\n");
  507                         ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
  508                 }
  509         }
  510 
  511         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  512                 if (bootverbose)
  513                         device_printf(sc->xl_dev, "found BNC\n");
  514                 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
  515         }
  516 }
  517 
  518 /*
  519  * The EEPROM is slow: give it time to come ready after issuing
  520  * it a command.
  521  */
  522 static int
  523 xl_eeprom_wait(struct xl_softc *sc)
  524 {
  525         int                     i;
  526 
  527         for (i = 0; i < 100; i++) {
  528                 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
  529                         DELAY(162);
  530                 else
  531                         break;
  532         }
  533 
  534         if (i == 100) {
  535                 device_printf(sc->xl_dev, "eeprom failed to come ready\n");
  536                 return (1);
  537         }
  538 
  539         return (0);
  540 }
  541 
  542 /*
  543  * Read a sequence of words from the EEPROM. Note that ethernet address
  544  * data is stored in the EEPROM in network byte order.
  545  */
  546 static int
  547 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
  548 {
  549         int                     err = 0, i;
  550         u_int16_t               word = 0, *ptr;
  551 
  552 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
  553 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
  554         /*
  555          * XXX: WARNING! DANGER!
  556          * It's easy to accidentally overwrite the rom content!
  557          * Note: the 3c575 uses 8bit EEPROM offsets.
  558          */
  559         XL_SEL_WIN(0);
  560 
  561         if (xl_eeprom_wait(sc))
  562                 return (1);
  563 
  564         if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
  565                 off += 0x30;
  566 
  567         for (i = 0; i < cnt; i++) {
  568                 if (sc->xl_flags & XL_FLAG_8BITROM)
  569                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  570                             XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
  571                 else
  572                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  573                             XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
  574                 err = xl_eeprom_wait(sc);
  575                 if (err)
  576                         break;
  577                 word = CSR_READ_2(sc, XL_W0_EE_DATA);
  578                 ptr = (u_int16_t *)(dest + (i * 2));
  579                 if (swap)
  580                         *ptr = ntohs(word);
  581                 else
  582                         *ptr = word;
  583         }
  584 
  585         return (err ? 1 : 0);
  586 }
  587 
  588 static void
  589 xl_rxfilter(struct xl_softc *sc)
  590 {
  591 
  592         if (sc->xl_type == XL_TYPE_905B)
  593                 xl_rxfilter_90xB(sc);
  594         else
  595                 xl_rxfilter_90x(sc);
  596 }
  597 
  598 /*
  599  * NICs older than the 3c905B have only one multicast option, which
  600  * is to enable reception of all multicast frames.
  601  */
  602 static void
  603 xl_rxfilter_90x(struct xl_softc *sc)
  604 {
  605         struct ifnet            *ifp;
  606         struct ifmultiaddr      *ifma;
  607         u_int8_t                rxfilt;
  608 
  609         XL_LOCK_ASSERT(sc);
  610 
  611         ifp = sc->xl_ifp;
  612 
  613         XL_SEL_WIN(5);
  614         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  615         rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
  616             XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
  617 
  618         /* Set the individual bit to receive frames for this host only. */
  619         rxfilt |= XL_RXFILTER_INDIVIDUAL;
  620         /* Set capture broadcast bit to capture broadcast frames. */
  621         if (ifp->if_flags & IFF_BROADCAST)
  622                 rxfilt |= XL_RXFILTER_BROADCAST;
  623 
  624         /* If we want promiscuous mode, set the allframes bit. */
  625         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  626                 if (ifp->if_flags & IFF_PROMISC)
  627                         rxfilt |= XL_RXFILTER_ALLFRAMES;
  628                 if (ifp->if_flags & IFF_ALLMULTI)
  629                         rxfilt |= XL_RXFILTER_ALLMULTI;
  630         } else {
  631                 if_maddr_rlock(ifp);
  632                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  633                         if (ifma->ifma_addr->sa_family != AF_LINK)
  634                                 continue;
  635                         rxfilt |= XL_RXFILTER_ALLMULTI;
  636                         break;
  637                 }
  638                 if_maddr_runlock(ifp);
  639         }
  640 
  641         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  642         XL_SEL_WIN(7);
  643 }
  644 
  645 /*
  646  * 3c905B adapters have a hash filter that we can program.
  647  */
  648 static void
  649 xl_rxfilter_90xB(struct xl_softc *sc)
  650 {
  651         struct ifnet            *ifp;
  652         struct ifmultiaddr      *ifma;
  653         int                     i, mcnt;
  654         u_int16_t               h;
  655         u_int8_t                rxfilt;
  656 
  657         XL_LOCK_ASSERT(sc);
  658 
  659         ifp = sc->xl_ifp;
  660 
  661         XL_SEL_WIN(5);
  662         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  663         rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
  664             XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
  665             XL_RXFILTER_MULTIHASH);
  666 
  667         /* Set the individual bit to receive frames for this host only. */
  668         rxfilt |= XL_RXFILTER_INDIVIDUAL;
  669         /* Set capture broadcast bit to capture broadcast frames. */
  670         if (ifp->if_flags & IFF_BROADCAST)
  671                 rxfilt |= XL_RXFILTER_BROADCAST;
  672 
  673         /* If we want promiscuous mode, set the allframes bit. */
  674         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  675                 if (ifp->if_flags & IFF_PROMISC)
  676                         rxfilt |= XL_RXFILTER_ALLFRAMES;
  677                 if (ifp->if_flags & IFF_ALLMULTI)
  678                         rxfilt |= XL_RXFILTER_ALLMULTI;
  679         } else {
  680                 /* First, zot all the existing hash bits. */
  681                 for (i = 0; i < XL_HASHFILT_SIZE; i++)
  682                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i);
  683 
  684                 /* Now program new ones. */
  685                 mcnt = 0;
  686                 if_maddr_rlock(ifp);
  687                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  688                         if (ifma->ifma_addr->sa_family != AF_LINK)
  689                                 continue;
  690                         /*
  691                          * Note: the 3c905B currently only supports a 64-bit
  692                          * hash table, which means we really only need 6 bits,
  693                          * but the manual indicates that future chip revisions
  694                          * will have a 256-bit hash table, hence the routine
  695                          * is set up to calculate 8 bits of position info in
  696                          * case we need it some day.
  697                          * Note II, The Sequel: _CURRENT_ versions of the
  698                          * 3c905B have a 256 bit hash table. This means we have
  699                          * to use all 8 bits regardless.  On older cards, the
  700                          * upper 2 bits will be ignored. Grrrr....
  701                          */
  702                         h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  703                             ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
  704                         CSR_WRITE_2(sc, XL_COMMAND,
  705                             h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
  706                         mcnt++;
  707                 }
  708                 if_maddr_runlock(ifp);
  709                 if (mcnt > 0)
  710                         rxfilt |= XL_RXFILTER_MULTIHASH;
  711         }
  712 
  713         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  714         XL_SEL_WIN(7);
  715 }
  716 
  717 static void
  718 xl_setcfg(struct xl_softc *sc)
  719 {
  720         u_int32_t               icfg;
  721 
  722         /*XL_LOCK_ASSERT(sc);*/
  723 
  724         XL_SEL_WIN(3);
  725         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  726         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  727         if (sc->xl_media & XL_MEDIAOPT_MII ||
  728                 sc->xl_media & XL_MEDIAOPT_BT4)
  729                 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
  730         if (sc->xl_media & XL_MEDIAOPT_BTX)
  731                 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
  732 
  733         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  734         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  735 }
  736 
  737 static void
  738 xl_setmode(struct xl_softc *sc, int media)
  739 {
  740         u_int32_t               icfg;
  741         u_int16_t               mediastat;
  742         char                    *pmsg = "", *dmsg = "";
  743 
  744         XL_LOCK_ASSERT(sc);
  745 
  746         XL_SEL_WIN(4);
  747         mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
  748         XL_SEL_WIN(3);
  749         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  750 
  751         if (sc->xl_media & XL_MEDIAOPT_BT) {
  752                 if (IFM_SUBTYPE(media) == IFM_10_T) {
  753                         pmsg = "10baseT transceiver";
  754                         sc->xl_xcvr = XL_XCVR_10BT;
  755                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  756                         icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
  757                         mediastat |= XL_MEDIASTAT_LINKBEAT |
  758                             XL_MEDIASTAT_JABGUARD;
  759                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  760                 }
  761         }
  762 
  763         if (sc->xl_media & XL_MEDIAOPT_BFX) {
  764                 if (IFM_SUBTYPE(media) == IFM_100_FX) {
  765                         pmsg = "100baseFX port";
  766                         sc->xl_xcvr = XL_XCVR_100BFX;
  767                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  768                         icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
  769                         mediastat |= XL_MEDIASTAT_LINKBEAT;
  770                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  771                 }
  772         }
  773 
  774         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
  775                 if (IFM_SUBTYPE(media) == IFM_10_5) {
  776                         pmsg = "AUI port";
  777                         sc->xl_xcvr = XL_XCVR_AUI;
  778                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  779                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  780                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  781                             XL_MEDIASTAT_JABGUARD);
  782                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  783                 }
  784                 if (IFM_SUBTYPE(media) == IFM_10_FL) {
  785                         pmsg = "10baseFL transceiver";
  786                         sc->xl_xcvr = XL_XCVR_AUI;
  787                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  788                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  789                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  790                             XL_MEDIASTAT_JABGUARD);
  791                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  792                 }
  793         }
  794 
  795         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  796                 if (IFM_SUBTYPE(media) == IFM_10_2) {
  797                         pmsg = "AUI port";
  798                         sc->xl_xcvr = XL_XCVR_COAX;
  799                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  800                         icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
  801                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  802                             XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
  803                 }
  804         }
  805 
  806         if ((media & IFM_GMASK) == IFM_FDX ||
  807                         IFM_SUBTYPE(media) == IFM_100_FX) {
  808                 dmsg = "full";
  809                 XL_SEL_WIN(3);
  810                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  811         } else {
  812                 dmsg = "half";
  813                 XL_SEL_WIN(3);
  814                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  815                         (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  816         }
  817 
  818         if (IFM_SUBTYPE(media) == IFM_10_2)
  819                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
  820         else
  821                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  822 
  823         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  824         XL_SEL_WIN(4);
  825         CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
  826 
  827         DELAY(800);
  828         XL_SEL_WIN(7);
  829 
  830         device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
  831 }
  832 
  833 static void
  834 xl_reset(struct xl_softc *sc)
  835 {
  836         register int            i;
  837 
  838         XL_LOCK_ASSERT(sc);
  839 
  840         XL_SEL_WIN(0);
  841         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
  842             ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
  843              XL_RESETOPT_DISADVFD:0));
  844 
  845         /*
  846          * If we're using memory mapped register mode, pause briefly
  847          * after issuing the reset command before trying to access any
  848          * other registers. With my 3c575C cardbus card, failing to do
  849          * this results in the system locking up while trying to poll
  850          * the command busy bit in the status register.
  851          */
  852         if (sc->xl_flags & XL_FLAG_USE_MMIO)
  853                 DELAY(100000);
  854 
  855         for (i = 0; i < XL_TIMEOUT; i++) {
  856                 DELAY(10);
  857                 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
  858                         break;
  859         }
  860 
  861         if (i == XL_TIMEOUT)
  862                 device_printf(sc->xl_dev, "reset didn't complete\n");
  863 
  864         /* Reset TX and RX. */
  865         /* Note: the RX reset takes an absurd amount of time
  866          * on newer versions of the Tornado chips such as those
  867          * on the 3c905CX and newer 3c908C cards. We wait an
  868          * extra amount of time so that xl_wait() doesn't complain
  869          * and annoy the users.
  870          */
  871         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
  872         DELAY(100000);
  873         xl_wait(sc);
  874         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
  875         xl_wait(sc);
  876 
  877         if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
  878             sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
  879                 XL_SEL_WIN(2);
  880                 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
  881                     CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
  882                     ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
  883                     XL_RESETOPT_INVERT_LED : 0) |
  884                     ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
  885                     XL_RESETOPT_INVERT_MII : 0));
  886         }
  887 
  888         /* Wait a little while for the chip to get its brains in order. */
  889         DELAY(100000);
  890 }
  891 
  892 /*
  893  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
  894  * IDs against our list and return a device name if we find a match.
  895  */
  896 static int
  897 xl_probe(device_t dev)
  898 {
  899         const struct xl_type    *t;
  900 
  901         t = xl_devs;
  902 
  903         while (t->xl_name != NULL) {
  904                 if ((pci_get_vendor(dev) == t->xl_vid) &&
  905                     (pci_get_device(dev) == t->xl_did)) {
  906                         device_set_desc(dev, t->xl_name);
  907                         return (BUS_PROBE_DEFAULT);
  908                 }
  909                 t++;
  910         }
  911 
  912         return (ENXIO);
  913 }
  914 
  915 /*
  916  * This routine is a kludge to work around possible hardware faults
  917  * or manufacturing defects that can cause the media options register
  918  * (or reset options register, as it's called for the first generation
  919  * 3c90x adapters) to return an incorrect result. I have encountered
  920  * one Dell Latitude laptop docking station with an integrated 3c905-TX
  921  * which doesn't have any of the 'mediaopt' bits set. This screws up
  922  * the attach routine pretty badly because it doesn't know what media
  923  * to look for. If we find ourselves in this predicament, this routine
  924  * will try to guess the media options values and warn the user of a
  925  * possible manufacturing defect with his adapter/system/whatever.
  926  */
  927 static void
  928 xl_mediacheck(struct xl_softc *sc)
  929 {
  930 
  931         /*
  932          * If some of the media options bits are set, assume they are
  933          * correct. If not, try to figure it out down below.
  934          * XXX I should check for 10baseFL, but I don't have an adapter
  935          * to test with.
  936          */
  937         if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
  938                 /*
  939                  * Check the XCVR value. If it's not in the normal range
  940                  * of values, we need to fake it up here.
  941                  */
  942                 if (sc->xl_xcvr <= XL_XCVR_AUTO)
  943                         return;
  944                 else {
  945                         device_printf(sc->xl_dev,
  946                             "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
  947                         device_printf(sc->xl_dev,
  948                             "choosing new default based on card type\n");
  949                 }
  950         } else {
  951                 if (sc->xl_type == XL_TYPE_905B &&
  952                     sc->xl_media & XL_MEDIAOPT_10FL)
  953                         return;
  954                 device_printf(sc->xl_dev,
  955 "WARNING: no media options bits set in the media options register!!\n");
  956                 device_printf(sc->xl_dev,
  957 "this could be a manufacturing defect in your adapter or system\n");
  958                 device_printf(sc->xl_dev,
  959 "attempting to guess media type; you should probably consult your vendor\n");
  960         }
  961 
  962         xl_choose_xcvr(sc, 1);
  963 }
  964 
  965 static void
  966 xl_choose_xcvr(struct xl_softc *sc, int verbose)
  967 {
  968         u_int16_t               devid;
  969 
  970         /*
  971          * Read the device ID from the EEPROM.
  972          * This is what's loaded into the PCI device ID register, so it has
  973          * to be correct otherwise we wouldn't have gotten this far.
  974          */
  975         xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
  976 
  977         switch (devid) {
  978         case TC_DEVICEID_BOOMERANG_10BT:        /* 3c900-TPO */
  979         case TC_DEVICEID_KRAKATOA_10BT:         /* 3c900B-TPO */
  980                 sc->xl_media = XL_MEDIAOPT_BT;
  981                 sc->xl_xcvr = XL_XCVR_10BT;
  982                 if (verbose)
  983                         device_printf(sc->xl_dev,
  984                             "guessing 10BaseT transceiver\n");
  985                 break;
  986         case TC_DEVICEID_BOOMERANG_10BT_COMBO:  /* 3c900-COMBO */
  987         case TC_DEVICEID_KRAKATOA_10BT_COMBO:   /* 3c900B-COMBO */
  988                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
  989                 sc->xl_xcvr = XL_XCVR_10BT;
  990                 if (verbose)
  991                         device_printf(sc->xl_dev,
  992                             "guessing COMBO (AUI/BNC/TP)\n");
  993                 break;
  994         case TC_DEVICEID_KRAKATOA_10BT_TPC:     /* 3c900B-TPC */
  995                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
  996                 sc->xl_xcvr = XL_XCVR_10BT;
  997                 if (verbose)
  998                         device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
  999                 break;
 1000         case TC_DEVICEID_CYCLONE_10FL:          /* 3c900B-FL */
 1001                 sc->xl_media = XL_MEDIAOPT_10FL;
 1002                 sc->xl_xcvr = XL_XCVR_AUI;
 1003                 if (verbose)
 1004                         device_printf(sc->xl_dev, "guessing 10baseFL\n");
 1005                 break;
 1006         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1007         case TC_DEVICEID_HURRICANE_555:         /* 3c555 */
 1008         case TC_DEVICEID_HURRICANE_556:         /* 3c556 */
 1009         case TC_DEVICEID_HURRICANE_556B:        /* 3c556B */
 1010         case TC_DEVICEID_HURRICANE_575A:        /* 3c575TX */
 1011         case TC_DEVICEID_HURRICANE_575B:        /* 3c575B */
 1012         case TC_DEVICEID_HURRICANE_575C:        /* 3c575C */
 1013         case TC_DEVICEID_HURRICANE_656:         /* 3c656 */
 1014         case TC_DEVICEID_HURRICANE_656B:        /* 3c656B */
 1015         case TC_DEVICEID_TORNADO_656C:          /* 3c656C */
 1016         case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
 1017         case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:     /* 3c920B-EMB-WNM */
 1018                 sc->xl_media = XL_MEDIAOPT_MII;
 1019                 sc->xl_xcvr = XL_XCVR_MII;
 1020                 if (verbose)
 1021                         device_printf(sc->xl_dev, "guessing MII\n");
 1022                 break;
 1023         case TC_DEVICEID_BOOMERANG_100BT4:      /* 3c905-T4 */
 1024         case TC_DEVICEID_CYCLONE_10_100BT4:     /* 3c905B-T4 */
 1025                 sc->xl_media = XL_MEDIAOPT_BT4;
 1026                 sc->xl_xcvr = XL_XCVR_MII;
 1027                 if (verbose)
 1028                         device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
 1029                 break;
 1030         case TC_DEVICEID_HURRICANE_10_100BT:    /* 3c905B-TX */
 1031         case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
 1032         case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
 1033         case TC_DEVICEID_HURRICANE_SOHO100TX:   /* 3cSOHO100-TX */
 1034         case TC_DEVICEID_TORNADO_10_100BT:      /* 3c905C-TX */
 1035         case TC_DEVICEID_TORNADO_HOMECONNECT:   /* 3c450-TX */
 1036                 sc->xl_media = XL_MEDIAOPT_BTX;
 1037                 sc->xl_xcvr = XL_XCVR_AUTO;
 1038                 if (verbose)
 1039                         device_printf(sc->xl_dev, "guessing 10/100 internal\n");
 1040                 break;
 1041         case TC_DEVICEID_CYCLONE_10_100_COMBO:  /* 3c905B-COMBO */
 1042                 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1043                 sc->xl_xcvr = XL_XCVR_AUTO;
 1044                 if (verbose)
 1045                         device_printf(sc->xl_dev,
 1046                             "guessing 10/100 plus BNC/AUI\n");
 1047                 break;
 1048         default:
 1049                 device_printf(sc->xl_dev,
 1050                     "unknown device ID: %x -- defaulting to 10baseT\n", devid);
 1051                 sc->xl_media = XL_MEDIAOPT_BT;
 1052                 break;
 1053         }
 1054 }
 1055 
 1056 /*
 1057  * Attach the interface. Allocate softc structures, do ifmedia
 1058  * setup and ethernet/BPF attach.
 1059  */
 1060 static int
 1061 xl_attach(device_t dev)
 1062 {
 1063         u_char                  eaddr[ETHER_ADDR_LEN];
 1064         u_int16_t               sinfo2, xcvr[2];
 1065         struct xl_softc         *sc;
 1066         struct ifnet            *ifp;
 1067         int                     media, pmcap;
 1068         int                     error = 0, phy, rid, res, unit;
 1069         uint16_t                did;
 1070 
 1071         sc = device_get_softc(dev);
 1072         sc->xl_dev = dev;
 1073 
 1074         unit = device_get_unit(dev);
 1075 
 1076         mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1077             MTX_DEF);
 1078         ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
 1079 
 1080         did = pci_get_device(dev);
 1081 
 1082         sc->xl_flags = 0;
 1083         if (did == TC_DEVICEID_HURRICANE_555)
 1084                 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
 1085         if (did == TC_DEVICEID_HURRICANE_556 ||
 1086             did == TC_DEVICEID_HURRICANE_556B)
 1087                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
 1088                     XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
 1089                     XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
 1090         if (did == TC_DEVICEID_HURRICANE_555 ||
 1091             did == TC_DEVICEID_HURRICANE_556)
 1092                 sc->xl_flags |= XL_FLAG_8BITROM;
 1093         if (did == TC_DEVICEID_HURRICANE_556B)
 1094                 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
 1095 
 1096         if (did == TC_DEVICEID_HURRICANE_575B ||
 1097             did == TC_DEVICEID_HURRICANE_575C ||
 1098             did == TC_DEVICEID_HURRICANE_656B ||
 1099             did == TC_DEVICEID_TORNADO_656C)
 1100                 sc->xl_flags |= XL_FLAG_FUNCREG;
 1101         if (did == TC_DEVICEID_HURRICANE_575A ||
 1102             did == TC_DEVICEID_HURRICANE_575B ||
 1103             did == TC_DEVICEID_HURRICANE_575C ||
 1104             did == TC_DEVICEID_HURRICANE_656B ||
 1105             did == TC_DEVICEID_TORNADO_656C)
 1106                 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
 1107                   XL_FLAG_8BITROM;
 1108         if (did == TC_DEVICEID_HURRICANE_656)
 1109                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
 1110         if (did == TC_DEVICEID_HURRICANE_575B)
 1111                 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
 1112         if (did == TC_DEVICEID_HURRICANE_575C)
 1113                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1114         if (did == TC_DEVICEID_TORNADO_656C)
 1115                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1116         if (did == TC_DEVICEID_HURRICANE_656 ||
 1117             did == TC_DEVICEID_HURRICANE_656B)
 1118                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
 1119                     XL_FLAG_INVERT_LED_PWR;
 1120         if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
 1121             did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
 1122                 sc->xl_flags |= XL_FLAG_PHYOK;
 1123 
 1124         switch (did) {
 1125         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1126         case TC_DEVICEID_HURRICANE_575A:
 1127         case TC_DEVICEID_HURRICANE_575B:
 1128         case TC_DEVICEID_HURRICANE_575C:
 1129                 sc->xl_flags |= XL_FLAG_NO_MMIO;
 1130                 break;
 1131         default:
 1132                 break;
 1133         }
 1134 
 1135         /*
 1136          * Map control/status registers.
 1137          */
 1138         pci_enable_busmaster(dev);
 1139 
 1140         if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
 1141                 rid = XL_PCI_LOMEM;
 1142                 res = SYS_RES_MEMORY;
 1143 
 1144                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1145         }
 1146 
 1147         if (sc->xl_res != NULL) {
 1148                 sc->xl_flags |= XL_FLAG_USE_MMIO;
 1149                 if (bootverbose)
 1150                         device_printf(dev, "using memory mapped I/O\n");
 1151         } else {
 1152                 rid = XL_PCI_LOIO;
 1153                 res = SYS_RES_IOPORT;
 1154                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1155                 if (sc->xl_res == NULL) {
 1156                         device_printf(dev, "couldn't map ports/memory\n");
 1157                         error = ENXIO;
 1158                         goto fail;
 1159                 }
 1160                 if (bootverbose)
 1161                         device_printf(dev, "using port I/O\n");
 1162         }
 1163 
 1164         sc->xl_btag = rman_get_bustag(sc->xl_res);
 1165         sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
 1166 
 1167         if (sc->xl_flags & XL_FLAG_FUNCREG) {
 1168                 rid = XL_PCI_FUNCMEM;
 1169                 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1170                     RF_ACTIVE);
 1171 
 1172                 if (sc->xl_fres == NULL) {
 1173                         device_printf(dev, "couldn't map funcreg memory\n");
 1174                         error = ENXIO;
 1175                         goto fail;
 1176                 }
 1177 
 1178                 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
 1179                 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
 1180         }
 1181 
 1182         /* Allocate interrupt */
 1183         rid = 0;
 1184         sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1185             RF_SHAREABLE | RF_ACTIVE);
 1186         if (sc->xl_irq == NULL) {
 1187                 device_printf(dev, "couldn't map interrupt\n");
 1188                 error = ENXIO;
 1189                 goto fail;
 1190         }
 1191 
 1192         /* Initialize interface name. */
 1193         ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
 1194         if (ifp == NULL) {
 1195                 device_printf(dev, "can not if_alloc()\n");
 1196                 error = ENOSPC;
 1197                 goto fail;
 1198         }
 1199         ifp->if_softc = sc;
 1200         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1201 
 1202         /* Reset the adapter. */
 1203         XL_LOCK(sc);
 1204         xl_reset(sc);
 1205         XL_UNLOCK(sc);
 1206 
 1207         /*
 1208          * Get station address from the EEPROM.
 1209          */
 1210         if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
 1211                 device_printf(dev, "failed to read station address\n");
 1212                 error = ENXIO;
 1213                 goto fail;
 1214         }
 1215 
 1216         callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0);
 1217         TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
 1218 
 1219         /*
 1220          * Now allocate a tag for the DMA descriptor lists and a chunk
 1221          * of DMA-able memory based on the tag.  Also obtain the DMA
 1222          * addresses of the RX and TX ring, which we'll need later.
 1223          * All of our lists are allocated as a contiguous block
 1224          * of memory.
 1225          */
 1226         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1227             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1228             XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
 1229             &sc->xl_ldata.xl_rx_tag);
 1230         if (error) {
 1231                 device_printf(dev, "failed to allocate rx dma tag\n");
 1232                 goto fail;
 1233         }
 1234 
 1235         error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
 1236             (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT |
 1237             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap);
 1238         if (error) {
 1239                 device_printf(dev, "no memory for rx list buffers!\n");
 1240                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1241                 sc->xl_ldata.xl_rx_tag = NULL;
 1242                 goto fail;
 1243         }
 1244 
 1245         error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
 1246             sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
 1247             XL_RX_LIST_SZ, xl_dma_map_addr,
 1248             &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
 1249         if (error) {
 1250                 device_printf(dev, "cannot get dma address of the rx ring!\n");
 1251                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1252                     sc->xl_ldata.xl_rx_dmamap);
 1253                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1254                 sc->xl_ldata.xl_rx_tag = NULL;
 1255                 goto fail;
 1256         }
 1257 
 1258         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1259             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1260             XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
 1261             &sc->xl_ldata.xl_tx_tag);
 1262         if (error) {
 1263                 device_printf(dev, "failed to allocate tx dma tag\n");
 1264                 goto fail;
 1265         }
 1266 
 1267         error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
 1268             (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT |
 1269             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap);
 1270         if (error) {
 1271                 device_printf(dev, "no memory for list buffers!\n");
 1272                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1273                 sc->xl_ldata.xl_tx_tag = NULL;
 1274                 goto fail;
 1275         }
 1276 
 1277         error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
 1278             sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
 1279             XL_TX_LIST_SZ, xl_dma_map_addr,
 1280             &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
 1281         if (error) {
 1282                 device_printf(dev, "cannot get dma address of the tx ring!\n");
 1283                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1284                     sc->xl_ldata.xl_tx_dmamap);
 1285                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1286                 sc->xl_ldata.xl_tx_tag = NULL;
 1287                 goto fail;
 1288         }
 1289 
 1290         /*
 1291          * Allocate a DMA tag for the mapping of mbufs.
 1292          */
 1293         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
 1294             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1295             MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
 1296             NULL, &sc->xl_mtag);
 1297         if (error) {
 1298                 device_printf(dev, "failed to allocate mbuf dma tag\n");
 1299                 goto fail;
 1300         }
 1301 
 1302         /* We need a spare DMA map for the RX ring. */
 1303         error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
 1304         if (error)
 1305                 goto fail;
 1306 
 1307         /*
 1308          * Figure out the card type. 3c905B adapters have the
 1309          * 'supportsNoTxLength' bit set in the capabilities
 1310          * word in the EEPROM.
 1311          * Note: my 3c575C cardbus card lies. It returns a value
 1312          * of 0x1578 for its capabilities word, which is somewhat
 1313          * nonsensical. Another way to distinguish a 3c90x chip
 1314          * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
 1315          * bit. This will only be set for 3c90x boomerage chips.
 1316          */
 1317         xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
 1318         if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
 1319             !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
 1320                 sc->xl_type = XL_TYPE_905B;
 1321         else
 1322                 sc->xl_type = XL_TYPE_90X;
 1323 
 1324         /* Check availability of WOL. */
 1325         if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 &&
 1326             pci_find_extcap(dev, PCIY_PMG, &pmcap) == 0) {
 1327                 sc->xl_pmcap = pmcap;
 1328                 sc->xl_flags |= XL_FLAG_WOL;
 1329                 sinfo2 = 0;
 1330                 xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0);
 1331                 if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose)
 1332                         device_printf(dev,
 1333                             "No auxiliary remote wakeup connector!\n");
 1334         }
 1335 
 1336         /* Set the TX start threshold for best performance. */
 1337         sc->xl_tx_thresh = XL_MIN_FRAMELEN;
 1338 
 1339         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1340         ifp->if_ioctl = xl_ioctl;
 1341         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1342         if (sc->xl_type == XL_TYPE_905B) {
 1343                 ifp->if_hwassist = XL905B_CSUM_FEATURES;
 1344 #ifdef XL905B_TXCSUM_BROKEN
 1345                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1346 #else
 1347                 ifp->if_capabilities |= IFCAP_HWCSUM;
 1348 #endif
 1349         }
 1350         if ((sc->xl_flags & XL_FLAG_WOL) != 0)
 1351                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
 1352         ifp->if_capenable = ifp->if_capabilities;
 1353 #ifdef DEVICE_POLLING
 1354         ifp->if_capabilities |= IFCAP_POLLING;
 1355 #endif
 1356         ifp->if_start = xl_start;
 1357         ifp->if_init = xl_init;
 1358         IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
 1359         ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
 1360         IFQ_SET_READY(&ifp->if_snd);
 1361 
 1362         /*
 1363          * Now we have to see what sort of media we have.
 1364          * This includes probing for an MII interace and a
 1365          * possible PHY.
 1366          */
 1367         XL_SEL_WIN(3);
 1368         sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
 1369         if (bootverbose)
 1370                 device_printf(dev, "media options word: %x\n", sc->xl_media);
 1371 
 1372         xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
 1373         sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
 1374         sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
 1375         sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
 1376 
 1377         xl_mediacheck(sc);
 1378 
 1379         if (sc->xl_media & XL_MEDIAOPT_MII ||
 1380             sc->xl_media & XL_MEDIAOPT_BTX ||
 1381             sc->xl_media & XL_MEDIAOPT_BT4) {
 1382                 if (bootverbose)
 1383                         device_printf(dev, "found MII/AUTO\n");
 1384                 xl_setcfg(sc);
 1385                 /*
 1386                  * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK.
 1387                  * This is to guard against problems with certain 3Com ASIC
 1388                  * revisions that incorrectly map the internal transceiver
 1389                  * control registers at all MII addresses.
 1390                  */
 1391                 phy = MII_PHY_ANY;
 1392                 if ((sc->xl_flags & XL_FLAG_PHYOK) == 0)
 1393                         phy = 24;
 1394                 error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd,
 1395                     xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
 1396                     sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0);
 1397                 if (error != 0) {
 1398                         device_printf(dev, "attaching PHYs failed\n");
 1399                         goto fail;
 1400                 }
 1401                 goto done;
 1402         }
 1403 
 1404         /*
 1405          * Sanity check. If the user has selected "auto" and this isn't
 1406          * a 10/100 card of some kind, we need to force the transceiver
 1407          * type to something sane.
 1408          */
 1409         if (sc->xl_xcvr == XL_XCVR_AUTO)
 1410                 xl_choose_xcvr(sc, bootverbose);
 1411 
 1412         /*
 1413          * Do ifmedia setup.
 1414          */
 1415         if (sc->xl_media & XL_MEDIAOPT_BT) {
 1416                 if (bootverbose)
 1417                         device_printf(dev, "found 10baseT\n");
 1418                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 1419                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
 1420                 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1421                         ifmedia_add(&sc->ifmedia,
 1422                             IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 1423         }
 1424 
 1425         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
 1426                 /*
 1427                  * Check for a 10baseFL board in disguise.
 1428                  */
 1429                 if (sc->xl_type == XL_TYPE_905B &&
 1430                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1431                         if (bootverbose)
 1432                                 device_printf(dev, "found 10baseFL\n");
 1433                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
 1434                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
 1435                             0, NULL);
 1436                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1437                                 ifmedia_add(&sc->ifmedia,
 1438                                     IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
 1439                 } else {
 1440                         if (bootverbose)
 1441                                 device_printf(dev, "found AUI\n");
 1442                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
 1443                 }
 1444         }
 1445 
 1446         if (sc->xl_media & XL_MEDIAOPT_BNC) {
 1447                 if (bootverbose)
 1448                         device_printf(dev, "found BNC\n");
 1449                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
 1450         }
 1451 
 1452         if (sc->xl_media & XL_MEDIAOPT_BFX) {
 1453                 if (bootverbose)
 1454                         device_printf(dev, "found 100baseFX\n");
 1455                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
 1456         }
 1457 
 1458         media = IFM_ETHER|IFM_100_TX|IFM_FDX;
 1459         xl_choose_media(sc, &media);
 1460 
 1461         if (sc->xl_miibus == NULL)
 1462                 ifmedia_set(&sc->ifmedia, media);
 1463 
 1464 done:
 1465         if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
 1466                 XL_SEL_WIN(0);
 1467                 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
 1468         }
 1469 
 1470         /*
 1471          * Call MI attach routine.
 1472          */
 1473         ether_ifattach(ifp, eaddr);
 1474 
 1475         error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1476             NULL, xl_intr, sc, &sc->xl_intrhand);
 1477         if (error) {
 1478                 device_printf(dev, "couldn't set up irq\n");
 1479                 ether_ifdetach(ifp);
 1480                 goto fail;
 1481         }
 1482 
 1483 fail:
 1484         if (error)
 1485                 xl_detach(dev);
 1486 
 1487         return (error);
 1488 }
 1489 
 1490 /*
 1491  * Choose a default media.
 1492  * XXX This is a leaf function only called by xl_attach() and
 1493  *     acquires/releases the non-recursible driver mutex to
 1494  *     satisfy lock assertions.
 1495  */
 1496 static void
 1497 xl_choose_media(struct xl_softc *sc, int *media)
 1498 {
 1499 
 1500         XL_LOCK(sc);
 1501 
 1502         switch (sc->xl_xcvr) {
 1503         case XL_XCVR_10BT:
 1504                 *media = IFM_ETHER|IFM_10_T;
 1505                 xl_setmode(sc, *media);
 1506                 break;
 1507         case XL_XCVR_AUI:
 1508                 if (sc->xl_type == XL_TYPE_905B &&
 1509                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1510                         *media = IFM_ETHER|IFM_10_FL;
 1511                         xl_setmode(sc, *media);
 1512                 } else {
 1513                         *media = IFM_ETHER|IFM_10_5;
 1514                         xl_setmode(sc, *media);
 1515                 }
 1516                 break;
 1517         case XL_XCVR_COAX:
 1518                 *media = IFM_ETHER|IFM_10_2;
 1519                 xl_setmode(sc, *media);
 1520                 break;
 1521         case XL_XCVR_AUTO:
 1522         case XL_XCVR_100BTX:
 1523         case XL_XCVR_MII:
 1524                 /* Chosen by miibus */
 1525                 break;
 1526         case XL_XCVR_100BFX:
 1527                 *media = IFM_ETHER|IFM_100_FX;
 1528                 break;
 1529         default:
 1530                 device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
 1531                     sc->xl_xcvr);
 1532                 /*
 1533                  * This will probably be wrong, but it prevents
 1534                  * the ifmedia code from panicking.
 1535                  */
 1536                 *media = IFM_ETHER|IFM_10_T;
 1537                 break;
 1538         }
 1539 
 1540         XL_UNLOCK(sc);
 1541 }
 1542 
 1543 /*
 1544  * Shutdown hardware and free up resources. This can be called any
 1545  * time after the mutex has been initialized. It is called in both
 1546  * the error case in attach and the normal detach case so it needs
 1547  * to be careful about only freeing resources that have actually been
 1548  * allocated.
 1549  */
 1550 static int
 1551 xl_detach(device_t dev)
 1552 {
 1553         struct xl_softc         *sc;
 1554         struct ifnet            *ifp;
 1555         int                     rid, res;
 1556 
 1557         sc = device_get_softc(dev);
 1558         ifp = sc->xl_ifp;
 1559 
 1560         KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
 1561 
 1562 #ifdef DEVICE_POLLING
 1563         if (ifp && ifp->if_capenable & IFCAP_POLLING)
 1564                 ether_poll_deregister(ifp);
 1565 #endif
 1566 
 1567         if (sc->xl_flags & XL_FLAG_USE_MMIO) {
 1568                 rid = XL_PCI_LOMEM;
 1569                 res = SYS_RES_MEMORY;
 1570         } else {
 1571                 rid = XL_PCI_LOIO;
 1572                 res = SYS_RES_IOPORT;
 1573         }
 1574 
 1575         /* These should only be active if attach succeeded */
 1576         if (device_is_attached(dev)) {
 1577                 XL_LOCK(sc);
 1578                 xl_stop(sc);
 1579                 XL_UNLOCK(sc);
 1580                 taskqueue_drain(taskqueue_swi, &sc->xl_task);
 1581                 callout_drain(&sc->xl_tick_callout);
 1582                 ether_ifdetach(ifp);
 1583         }
 1584         if (sc->xl_miibus)
 1585                 device_delete_child(dev, sc->xl_miibus);
 1586         bus_generic_detach(dev);
 1587         ifmedia_removeall(&sc->ifmedia);
 1588 
 1589         if (sc->xl_intrhand)
 1590                 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
 1591         if (sc->xl_irq)
 1592                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
 1593         if (sc->xl_fres != NULL)
 1594                 bus_release_resource(dev, SYS_RES_MEMORY,
 1595                     XL_PCI_FUNCMEM, sc->xl_fres);
 1596         if (sc->xl_res)
 1597                 bus_release_resource(dev, res, rid, sc->xl_res);
 1598 
 1599         if (ifp)
 1600                 if_free(ifp);
 1601 
 1602         if (sc->xl_mtag) {
 1603                 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
 1604                 bus_dma_tag_destroy(sc->xl_mtag);
 1605         }
 1606         if (sc->xl_ldata.xl_rx_tag) {
 1607                 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
 1608                     sc->xl_ldata.xl_rx_dmamap);
 1609                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1610                     sc->xl_ldata.xl_rx_dmamap);
 1611                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1612         }
 1613         if (sc->xl_ldata.xl_tx_tag) {
 1614                 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
 1615                     sc->xl_ldata.xl_tx_dmamap);
 1616                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1617                     sc->xl_ldata.xl_tx_dmamap);
 1618                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1619         }
 1620 
 1621         mtx_destroy(&sc->xl_mtx);
 1622 
 1623         return (0);
 1624 }
 1625 
 1626 /*
 1627  * Initialize the transmit descriptors.
 1628  */
 1629 static int
 1630 xl_list_tx_init(struct xl_softc *sc)
 1631 {
 1632         struct xl_chain_data    *cd;
 1633         struct xl_list_data     *ld;
 1634         int                     error, i;
 1635 
 1636         XL_LOCK_ASSERT(sc);
 1637 
 1638         cd = &sc->xl_cdata;
 1639         ld = &sc->xl_ldata;
 1640         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1641                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1642                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1643                     &cd->xl_tx_chain[i].xl_map);
 1644                 if (error)
 1645                         return (error);
 1646                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1647                     i * sizeof(struct xl_list);
 1648                 if (i == (XL_TX_LIST_CNT - 1))
 1649                         cd->xl_tx_chain[i].xl_next = NULL;
 1650                 else
 1651                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1652         }
 1653 
 1654         cd->xl_tx_free = &cd->xl_tx_chain[0];
 1655         cd->xl_tx_tail = cd->xl_tx_head = NULL;
 1656 
 1657         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1658         return (0);
 1659 }
 1660 
 1661 /*
 1662  * Initialize the transmit descriptors.
 1663  */
 1664 static int
 1665 xl_list_tx_init_90xB(struct xl_softc *sc)
 1666 {
 1667         struct xl_chain_data    *cd;
 1668         struct xl_list_data     *ld;
 1669         int                     error, i;
 1670 
 1671         XL_LOCK_ASSERT(sc);
 1672 
 1673         cd = &sc->xl_cdata;
 1674         ld = &sc->xl_ldata;
 1675         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1676                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1677                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1678                     &cd->xl_tx_chain[i].xl_map);
 1679                 if (error)
 1680                         return (error);
 1681                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1682                     i * sizeof(struct xl_list);
 1683                 if (i == (XL_TX_LIST_CNT - 1))
 1684                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
 1685                 else
 1686                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1687                 if (i == 0)
 1688                         cd->xl_tx_chain[i].xl_prev =
 1689                             &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
 1690                 else
 1691                         cd->xl_tx_chain[i].xl_prev =
 1692                             &cd->xl_tx_chain[i - 1];
 1693         }
 1694 
 1695         bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
 1696         ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
 1697 
 1698         cd->xl_tx_prod = 1;
 1699         cd->xl_tx_cons = 1;
 1700         cd->xl_tx_cnt = 0;
 1701 
 1702         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1703         return (0);
 1704 }
 1705 
 1706 /*
 1707  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1708  * we arrange the descriptors in a closed ring, so that the last descriptor
 1709  * points back to the first.
 1710  */
 1711 static int
 1712 xl_list_rx_init(struct xl_softc *sc)
 1713 {
 1714         struct xl_chain_data    *cd;
 1715         struct xl_list_data     *ld;
 1716         int                     error, i, next;
 1717         u_int32_t               nextptr;
 1718 
 1719         XL_LOCK_ASSERT(sc);
 1720 
 1721         cd = &sc->xl_cdata;
 1722         ld = &sc->xl_ldata;
 1723 
 1724         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1725                 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
 1726                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1727                     &cd->xl_rx_chain[i].xl_map);
 1728                 if (error)
 1729                         return (error);
 1730                 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
 1731                 if (error)
 1732                         return (error);
 1733                 if (i == (XL_RX_LIST_CNT - 1))
 1734                         next = 0;
 1735                 else
 1736                         next = i + 1;
 1737                 nextptr = ld->xl_rx_dmaaddr +
 1738                     next * sizeof(struct xl_list_onefrag);
 1739                 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
 1740                 ld->xl_rx_list[i].xl_next = htole32(nextptr);
 1741         }
 1742 
 1743         bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1744         cd->xl_rx_head = &cd->xl_rx_chain[0];
 1745 
 1746         return (0);
 1747 }
 1748 
 1749 /*
 1750  * Initialize an RX descriptor and attach an MBUF cluster.
 1751  * If we fail to do so, we need to leave the old mbuf and
 1752  * the old DMA map untouched so that it can be reused.
 1753  */
 1754 static int
 1755 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
 1756 {
 1757         struct mbuf             *m_new = NULL;
 1758         bus_dmamap_t            map;
 1759         bus_dma_segment_t       segs[1];
 1760         int                     error, nseg;
 1761 
 1762         XL_LOCK_ASSERT(sc);
 1763 
 1764         m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1765         if (m_new == NULL)
 1766                 return (ENOBUFS);
 1767 
 1768         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
 1769 
 1770         /* Force longword alignment for packet payload. */
 1771         m_adj(m_new, ETHER_ALIGN);
 1772 
 1773         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
 1774             segs, &nseg, BUS_DMA_NOWAIT);
 1775         if (error) {
 1776                 m_freem(m_new);
 1777                 device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
 1778                     error);
 1779                 return (error);
 1780         }
 1781         KASSERT(nseg == 1,
 1782             ("%s: too many DMA segments (%d)", __func__, nseg));
 1783 
 1784         bus_dmamap_unload(sc->xl_mtag, c->xl_map);
 1785         map = c->xl_map;
 1786         c->xl_map = sc->xl_tmpmap;
 1787         sc->xl_tmpmap = map;
 1788         c->xl_mbuf = m_new;
 1789         c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
 1790         c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
 1791         c->xl_ptr->xl_status = 0;
 1792         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
 1793         return (0);
 1794 }
 1795 
 1796 static int
 1797 xl_rx_resync(struct xl_softc *sc)
 1798 {
 1799         struct xl_chain_onefrag *pos;
 1800         int                     i;
 1801 
 1802         XL_LOCK_ASSERT(sc);
 1803 
 1804         pos = sc->xl_cdata.xl_rx_head;
 1805 
 1806         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1807                 if (pos->xl_ptr->xl_status)
 1808                         break;
 1809                 pos = pos->xl_next;
 1810         }
 1811 
 1812         if (i == XL_RX_LIST_CNT)
 1813                 return (0);
 1814 
 1815         sc->xl_cdata.xl_rx_head = pos;
 1816 
 1817         return (EAGAIN);
 1818 }
 1819 
 1820 /*
 1821  * A frame has been uploaded: pass the resulting mbuf chain up to
 1822  * the higher level protocols.
 1823  */
 1824 static int
 1825 xl_rxeof(struct xl_softc *sc)
 1826 {
 1827         struct mbuf             *m;
 1828         struct ifnet            *ifp = sc->xl_ifp;
 1829         struct xl_chain_onefrag *cur_rx;
 1830         int                     total_len;
 1831         int                     rx_npkts = 0;
 1832         u_int32_t               rxstat;
 1833 
 1834         XL_LOCK_ASSERT(sc);
 1835 again:
 1836         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
 1837             BUS_DMASYNC_POSTREAD);
 1838         while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
 1839 #ifdef DEVICE_POLLING
 1840                 if (ifp->if_capenable & IFCAP_POLLING) {
 1841                         if (sc->rxcycles <= 0)
 1842                                 break;
 1843                         sc->rxcycles--;
 1844                 }
 1845 #endif
 1846                 cur_rx = sc->xl_cdata.xl_rx_head;
 1847                 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
 1848                 total_len = rxstat & XL_RXSTAT_LENMASK;
 1849                 rx_npkts++;
 1850 
 1851                 /*
 1852                  * Since we have told the chip to allow large frames,
 1853                  * we need to trap giant frame errors in software. We allow
 1854                  * a little more than the normal frame size to account for
 1855                  * frames with VLAN tags.
 1856                  */
 1857                 if (total_len > XL_MAX_FRAMELEN)
 1858                         rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
 1859 
 1860                 /*
 1861                  * If an error occurs, update stats, clear the
 1862                  * status word and leave the mbuf cluster in place:
 1863                  * it should simply get re-used next time this descriptor
 1864                  * comes up in the ring.
 1865                  */
 1866                 if (rxstat & XL_RXSTAT_UP_ERROR) {
 1867                         ifp->if_ierrors++;
 1868                         cur_rx->xl_ptr->xl_status = 0;
 1869                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1870                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1871                         continue;
 1872                 }
 1873 
 1874                 /*
 1875                  * If the error bit was not set, the upload complete
 1876                  * bit should be set which means we have a valid packet.
 1877                  * If not, something truly strange has happened.
 1878                  */
 1879                 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
 1880                         device_printf(sc->xl_dev,
 1881                             "bad receive status -- packet dropped\n");
 1882                         ifp->if_ierrors++;
 1883                         cur_rx->xl_ptr->xl_status = 0;
 1884                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1885                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1886                         continue;
 1887                 }
 1888 
 1889                 /* No errors; receive the packet. */
 1890                 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
 1891                     BUS_DMASYNC_POSTREAD);
 1892                 m = cur_rx->xl_mbuf;
 1893 
 1894                 /*
 1895                  * Try to conjure up a new mbuf cluster. If that
 1896                  * fails, it means we have an out of memory condition and
 1897                  * should leave the buffer in place and continue. This will
 1898                  * result in a lost packet, but there's little else we
 1899                  * can do in this situation.
 1900                  */
 1901                 if (xl_newbuf(sc, cur_rx)) {
 1902                         ifp->if_ierrors++;
 1903                         cur_rx->xl_ptr->xl_status = 0;
 1904                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1905                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1906                         continue;
 1907                 }
 1908                 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1909                     sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1910 
 1911                 ifp->if_ipackets++;
 1912                 m->m_pkthdr.rcvif = ifp;
 1913                 m->m_pkthdr.len = m->m_len = total_len;
 1914 
 1915                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 1916                         /* Do IP checksum checking. */
 1917                         if (rxstat & XL_RXSTAT_IPCKOK)
 1918                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1919                         if (!(rxstat & XL_RXSTAT_IPCKERR))
 1920                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1921                         if ((rxstat & XL_RXSTAT_TCPCOK &&
 1922                              !(rxstat & XL_RXSTAT_TCPCKERR)) ||
 1923                             (rxstat & XL_RXSTAT_UDPCKOK &&
 1924                              !(rxstat & XL_RXSTAT_UDPCKERR))) {
 1925                                 m->m_pkthdr.csum_flags |=
 1926                                         CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 1927                                 m->m_pkthdr.csum_data = 0xffff;
 1928                         }
 1929                 }
 1930 
 1931                 XL_UNLOCK(sc);
 1932                 (*ifp->if_input)(ifp, m);
 1933                 XL_LOCK(sc);
 1934 
 1935                 /*
 1936                  * If we are running from the taskqueue, the interface
 1937                  * might have been stopped while we were passing the last
 1938                  * packet up the network stack.
 1939                  */
 1940                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1941                         return (rx_npkts);
 1942         }
 1943 
 1944         /*
 1945          * Handle the 'end of channel' condition. When the upload
 1946          * engine hits the end of the RX ring, it will stall. This
 1947          * is our cue to flush the RX ring, reload the uplist pointer
 1948          * register and unstall the engine.
 1949          * XXX This is actually a little goofy. With the ThunderLAN
 1950          * chip, you get an interrupt when the receiver hits the end
 1951          * of the receive ring, which tells you exactly when you
 1952          * you need to reload the ring pointer. Here we have to
 1953          * fake it. I'm mad at myself for not being clever enough
 1954          * to avoid the use of a goto here.
 1955          */
 1956         if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
 1957                 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
 1958                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 1959                 xl_wait(sc);
 1960                 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 1961                 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
 1962                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 1963                 goto again;
 1964         }
 1965         return (rx_npkts);
 1966 }
 1967 
 1968 /*
 1969  * Taskqueue wrapper for xl_rxeof().
 1970  */
 1971 static void
 1972 xl_rxeof_task(void *arg, int pending)
 1973 {
 1974         struct xl_softc *sc = (struct xl_softc *)arg;
 1975 
 1976         XL_LOCK(sc);
 1977         if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
 1978                 xl_rxeof(sc);
 1979         XL_UNLOCK(sc);
 1980 }
 1981 
 1982 /*
 1983  * A frame was downloaded to the chip. It's safe for us to clean up
 1984  * the list buffers.
 1985  */
 1986 static void
 1987 xl_txeof(struct xl_softc *sc)
 1988 {
 1989         struct xl_chain         *cur_tx;
 1990         struct ifnet            *ifp = sc->xl_ifp;
 1991 
 1992         XL_LOCK_ASSERT(sc);
 1993 
 1994         /*
 1995          * Go through our tx list and free mbufs for those
 1996          * frames that have been uploaded. Note: the 3c905B
 1997          * sets a special bit in the status word to let us
 1998          * know that a frame has been downloaded, but the
 1999          * original 3c900/3c905 adapters don't do that.
 2000          * Consequently, we have to use a different test if
 2001          * xl_type != XL_TYPE_905B.
 2002          */
 2003         while (sc->xl_cdata.xl_tx_head != NULL) {
 2004                 cur_tx = sc->xl_cdata.xl_tx_head;
 2005 
 2006                 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2007                         break;
 2008 
 2009                 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
 2010                 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2011                     BUS_DMASYNC_POSTWRITE);
 2012                 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2013                 m_freem(cur_tx->xl_mbuf);
 2014                 cur_tx->xl_mbuf = NULL;
 2015                 ifp->if_opackets++;
 2016                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2017 
 2018                 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
 2019                 sc->xl_cdata.xl_tx_free = cur_tx;
 2020         }
 2021 
 2022         if (sc->xl_cdata.xl_tx_head == NULL) {
 2023                 sc->xl_wdog_timer = 0;
 2024                 sc->xl_cdata.xl_tx_tail = NULL;
 2025         } else {
 2026                 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
 2027                         !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
 2028                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2029                                 sc->xl_cdata.xl_tx_head->xl_phys);
 2030                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2031                 }
 2032         }
 2033 }
 2034 
 2035 static void
 2036 xl_txeof_90xB(struct xl_softc *sc)
 2037 {
 2038         struct xl_chain         *cur_tx = NULL;
 2039         struct ifnet            *ifp = sc->xl_ifp;
 2040         int                     idx;
 2041 
 2042         XL_LOCK_ASSERT(sc);
 2043 
 2044         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2045             BUS_DMASYNC_POSTREAD);
 2046         idx = sc->xl_cdata.xl_tx_cons;
 2047         while (idx != sc->xl_cdata.xl_tx_prod) {
 2048                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2049 
 2050                 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
 2051                       XL_TXSTAT_DL_COMPLETE))
 2052                         break;
 2053 
 2054                 if (cur_tx->xl_mbuf != NULL) {
 2055                         bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2056                             BUS_DMASYNC_POSTWRITE);
 2057                         bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2058                         m_freem(cur_tx->xl_mbuf);
 2059                         cur_tx->xl_mbuf = NULL;
 2060                 }
 2061 
 2062                 ifp->if_opackets++;
 2063 
 2064                 sc->xl_cdata.xl_tx_cnt--;
 2065                 XL_INC(idx, XL_TX_LIST_CNT);
 2066         }
 2067 
 2068         if (sc->xl_cdata.xl_tx_cnt == 0)
 2069                 sc->xl_wdog_timer = 0;
 2070         sc->xl_cdata.xl_tx_cons = idx;
 2071 
 2072         if (cur_tx != NULL)
 2073                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2074 }
 2075 
 2076 /*
 2077  * TX 'end of channel' interrupt handler. Actually, we should
 2078  * only get a 'TX complete' interrupt if there's a transmit error,
 2079  * so this is really TX error handler.
 2080  */
 2081 static void
 2082 xl_txeoc(struct xl_softc *sc)
 2083 {
 2084         u_int8_t                txstat;
 2085 
 2086         XL_LOCK_ASSERT(sc);
 2087 
 2088         while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
 2089                 if (txstat & XL_TXSTATUS_UNDERRUN ||
 2090                         txstat & XL_TXSTATUS_JABBER ||
 2091                         txstat & XL_TXSTATUS_RECLAIM) {
 2092                         device_printf(sc->xl_dev,
 2093                             "transmission error: 0x%02x\n", txstat);
 2094                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2095                         xl_wait(sc);
 2096                         if (sc->xl_type == XL_TYPE_905B) {
 2097                                 if (sc->xl_cdata.xl_tx_cnt) {
 2098                                         int                     i;
 2099                                         struct xl_chain         *c;
 2100 
 2101                                         i = sc->xl_cdata.xl_tx_cons;
 2102                                         c = &sc->xl_cdata.xl_tx_chain[i];
 2103                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2104                                             c->xl_phys);
 2105                                         CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2106                                         sc->xl_wdog_timer = 5;
 2107                                 }
 2108                         } else {
 2109                                 if (sc->xl_cdata.xl_tx_head != NULL) {
 2110                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2111                                             sc->xl_cdata.xl_tx_head->xl_phys);
 2112                                         sc->xl_wdog_timer = 5;
 2113                                 }
 2114                         }
 2115                         /*
 2116                          * Remember to set this for the
 2117                          * first generation 3c90X chips.
 2118                          */
 2119                         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2120                         if (txstat & XL_TXSTATUS_UNDERRUN &&
 2121                             sc->xl_tx_thresh < XL_PACKET_SIZE) {
 2122                                 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
 2123                                 device_printf(sc->xl_dev,
 2124 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
 2125                         }
 2126                         CSR_WRITE_2(sc, XL_COMMAND,
 2127                             XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2128                         if (sc->xl_type == XL_TYPE_905B) {
 2129                                 CSR_WRITE_2(sc, XL_COMMAND,
 2130                                 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2131                         }
 2132                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2133                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2134                 } else {
 2135                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2136                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2137                 }
 2138                 /*
 2139                  * Write an arbitrary byte to the TX_STATUS register
 2140                  * to clear this interrupt/error and advance to the next.
 2141                  */
 2142                 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
 2143         }
 2144 }
 2145 
 2146 static void
 2147 xl_intr(void *arg)
 2148 {
 2149         struct xl_softc         *sc = arg;
 2150         struct ifnet            *ifp = sc->xl_ifp;
 2151         u_int16_t               status;
 2152 
 2153         XL_LOCK(sc);
 2154 
 2155 #ifdef DEVICE_POLLING
 2156         if (ifp->if_capenable & IFCAP_POLLING) {
 2157                 XL_UNLOCK(sc);
 2158                 return;
 2159         }
 2160 #endif
 2161 
 2162         for (;;) {
 2163                 status = CSR_READ_2(sc, XL_STATUS);
 2164                 if ((status & XL_INTRS) == 0 || status == 0xFFFF)
 2165                         break;
 2166                 CSR_WRITE_2(sc, XL_COMMAND,
 2167                     XL_CMD_INTR_ACK|(status & XL_INTRS));
 2168                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 2169                         break;
 2170 
 2171                 if (status & XL_STAT_UP_COMPLETE) {
 2172                         if (xl_rxeof(sc) == 0) {
 2173                                 while (xl_rx_resync(sc))
 2174                                         xl_rxeof(sc);
 2175                         }
 2176                 }
 2177 
 2178                 if (status & XL_STAT_DOWN_COMPLETE) {
 2179                         if (sc->xl_type == XL_TYPE_905B)
 2180                                 xl_txeof_90xB(sc);
 2181                         else
 2182                                 xl_txeof(sc);
 2183                 }
 2184 
 2185                 if (status & XL_STAT_TX_COMPLETE) {
 2186                         ifp->if_oerrors++;
 2187                         xl_txeoc(sc);
 2188                 }
 2189 
 2190                 if (status & XL_STAT_ADFAIL) {
 2191                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2192                         xl_init_locked(sc);
 2193                         break;
 2194                 }
 2195 
 2196                 if (status & XL_STAT_STATSOFLOW)
 2197                         xl_stats_update(sc);
 2198         }
 2199 
 2200         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2201             ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2202                 if (sc->xl_type == XL_TYPE_905B)
 2203                         xl_start_90xB_locked(ifp);
 2204                 else
 2205                         xl_start_locked(ifp);
 2206         }
 2207 
 2208         XL_UNLOCK(sc);
 2209 }
 2210 
 2211 #ifdef DEVICE_POLLING
 2212 static int
 2213 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2214 {
 2215         struct xl_softc *sc = ifp->if_softc;
 2216         int rx_npkts = 0;
 2217 
 2218         XL_LOCK(sc);
 2219         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2220                 rx_npkts = xl_poll_locked(ifp, cmd, count);
 2221         XL_UNLOCK(sc);
 2222         return (rx_npkts);
 2223 }
 2224 
 2225 static int
 2226 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2227 {
 2228         struct xl_softc *sc = ifp->if_softc;
 2229         int rx_npkts;
 2230 
 2231         XL_LOCK_ASSERT(sc);
 2232 
 2233         sc->rxcycles = count;
 2234         rx_npkts = xl_rxeof(sc);
 2235         if (sc->xl_type == XL_TYPE_905B)
 2236                 xl_txeof_90xB(sc);
 2237         else
 2238                 xl_txeof(sc);
 2239 
 2240         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2241                 if (sc->xl_type == XL_TYPE_905B)
 2242                         xl_start_90xB_locked(ifp);
 2243                 else
 2244                         xl_start_locked(ifp);
 2245         }
 2246 
 2247         if (cmd == POLL_AND_CHECK_STATUS) {
 2248                 u_int16_t status;
 2249 
 2250                 status = CSR_READ_2(sc, XL_STATUS);
 2251                 if (status & XL_INTRS && status != 0xFFFF) {
 2252                         CSR_WRITE_2(sc, XL_COMMAND,
 2253                             XL_CMD_INTR_ACK|(status & XL_INTRS));
 2254 
 2255                         if (status & XL_STAT_TX_COMPLETE) {
 2256                                 ifp->if_oerrors++;
 2257                                 xl_txeoc(sc);
 2258                         }
 2259 
 2260                         if (status & XL_STAT_ADFAIL) {
 2261                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2262                                 xl_init_locked(sc);
 2263                         }
 2264 
 2265                         if (status & XL_STAT_STATSOFLOW)
 2266                                 xl_stats_update(sc);
 2267                 }
 2268         }
 2269         return (rx_npkts);
 2270 }
 2271 #endif /* DEVICE_POLLING */
 2272 
 2273 static void
 2274 xl_tick(void *xsc)
 2275 {
 2276         struct xl_softc *sc = xsc;
 2277         struct mii_data *mii;
 2278 
 2279         XL_LOCK_ASSERT(sc);
 2280 
 2281         if (sc->xl_miibus != NULL) {
 2282                 mii = device_get_softc(sc->xl_miibus);
 2283                 mii_tick(mii);
 2284         }
 2285 
 2286         xl_stats_update(sc);
 2287         if (xl_watchdog(sc) == EJUSTRETURN)
 2288                 return;
 2289 
 2290         callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
 2291 }
 2292 
 2293 static void
 2294 xl_stats_update(struct xl_softc *sc)
 2295 {
 2296         struct ifnet            *ifp = sc->xl_ifp;
 2297         struct xl_stats         xl_stats;
 2298         u_int8_t                *p;
 2299         int                     i;
 2300 
 2301         XL_LOCK_ASSERT(sc);
 2302 
 2303         bzero((char *)&xl_stats, sizeof(struct xl_stats));
 2304 
 2305         p = (u_int8_t *)&xl_stats;
 2306 
 2307         /* Read all the stats registers. */
 2308         XL_SEL_WIN(6);
 2309 
 2310         for (i = 0; i < 16; i++)
 2311                 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
 2312 
 2313         ifp->if_ierrors += xl_stats.xl_rx_overrun;
 2314 
 2315         ifp->if_collisions += xl_stats.xl_tx_multi_collision +
 2316             xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
 2317 
 2318         /*
 2319          * Boomerang and cyclone chips have an extra stats counter
 2320          * in window 4 (BadSSD). We have to read this too in order
 2321          * to clear out all the stats registers and avoid a statsoflow
 2322          * interrupt.
 2323          */
 2324         XL_SEL_WIN(4);
 2325         CSR_READ_1(sc, XL_W4_BADSSD);
 2326         XL_SEL_WIN(7);
 2327 }
 2328 
 2329 /*
 2330  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 2331  * pointers to the fragment pointers.
 2332  */
 2333 static int
 2334 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
 2335 {
 2336         struct mbuf             *m_new;
 2337         struct ifnet            *ifp = sc->xl_ifp;
 2338         int                     error, i, nseg, total_len;
 2339         u_int32_t               status;
 2340 
 2341         XL_LOCK_ASSERT(sc);
 2342 
 2343         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
 2344             sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2345 
 2346         if (error && error != EFBIG) {
 2347                 if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2348                 return (error);
 2349         }
 2350 
 2351         /*
 2352          * Handle special case: we used up all 63 fragments,
 2353          * but we have more mbufs left in the chain. Copy the
 2354          * data into an mbuf cluster. Note that we don't
 2355          * bother clearing the values in the other fragment
 2356          * pointers/counters; it wouldn't gain us anything,
 2357          * and would waste cycles.
 2358          */
 2359         if (error) {
 2360                 m_new = m_collapse(*m_head, M_DONTWAIT, XL_MAXFRAGS);
 2361                 if (m_new == NULL) {
 2362                         m_freem(*m_head);
 2363                         *m_head = NULL;
 2364                         return (ENOBUFS);
 2365                 }
 2366                 *m_head = m_new;
 2367 
 2368                 error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
 2369                     *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2370                 if (error) {
 2371                         m_freem(*m_head);
 2372                         *m_head = NULL;
 2373                         if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2374                         return (error);
 2375                 }
 2376         }
 2377 
 2378         KASSERT(nseg <= XL_MAXFRAGS,
 2379             ("%s: too many DMA segments (%d)", __func__, nseg));
 2380         if (nseg == 0) {
 2381                 m_freem(*m_head);
 2382                 *m_head = NULL;
 2383                 return (EIO);
 2384         }
 2385         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
 2386 
 2387         total_len = 0;
 2388         for (i = 0; i < nseg; i++) {
 2389                 KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
 2390                     ("segment size too large"));
 2391                 c->xl_ptr->xl_frag[i].xl_addr =
 2392                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
 2393                 c->xl_ptr->xl_frag[i].xl_len =
 2394                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
 2395                 total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
 2396         }
 2397         c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG);
 2398 
 2399         if (sc->xl_type == XL_TYPE_905B) {
 2400                 status = XL_TXSTAT_RND_DEFEAT;
 2401 
 2402 #ifndef XL905B_TXCSUM_BROKEN
 2403                 if ((*m_head)->m_pkthdr.csum_flags) {
 2404                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
 2405                                 status |= XL_TXSTAT_IPCKSUM;
 2406                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
 2407                                 status |= XL_TXSTAT_TCPCKSUM;
 2408                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
 2409                                 status |= XL_TXSTAT_UDPCKSUM;
 2410                 }
 2411 #endif
 2412         } else
 2413                 status = total_len;
 2414         c->xl_ptr->xl_status = htole32(status);
 2415         c->xl_ptr->xl_next = 0;
 2416 
 2417         c->xl_mbuf = *m_head;
 2418         return (0);
 2419 }
 2420 
 2421 /*
 2422  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 2423  * to the mbuf data regions directly in the transmit lists. We also save a
 2424  * copy of the pointers since the transmit list fragment pointers are
 2425  * physical addresses.
 2426  */
 2427 
 2428 static void
 2429 xl_start(struct ifnet *ifp)
 2430 {
 2431         struct xl_softc         *sc = ifp->if_softc;
 2432 
 2433         XL_LOCK(sc);
 2434 
 2435         if (sc->xl_type == XL_TYPE_905B)
 2436                 xl_start_90xB_locked(ifp);
 2437         else
 2438                 xl_start_locked(ifp);
 2439 
 2440         XL_UNLOCK(sc);
 2441 }
 2442 
 2443 static void
 2444 xl_start_locked(struct ifnet *ifp)
 2445 {
 2446         struct xl_softc         *sc = ifp->if_softc;
 2447         struct mbuf             *m_head;
 2448         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2449         struct xl_chain         *prev_tx;
 2450         int                     error;
 2451 
 2452         XL_LOCK_ASSERT(sc);
 2453 
 2454         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2455             IFF_DRV_RUNNING)
 2456                 return;
 2457         /*
 2458          * Check for an available queue slot. If there are none,
 2459          * punt.
 2460          */
 2461         if (sc->xl_cdata.xl_tx_free == NULL) {
 2462                 xl_txeoc(sc);
 2463                 xl_txeof(sc);
 2464                 if (sc->xl_cdata.xl_tx_free == NULL) {
 2465                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2466                         return;
 2467                 }
 2468         }
 2469 
 2470         start_tx = sc->xl_cdata.xl_tx_free;
 2471 
 2472         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2473             sc->xl_cdata.xl_tx_free != NULL;) {
 2474                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2475                 if (m_head == NULL)
 2476                         break;
 2477 
 2478                 /* Pick a descriptor off the free list. */
 2479                 prev_tx = cur_tx;
 2480                 cur_tx = sc->xl_cdata.xl_tx_free;
 2481 
 2482                 /* Pack the data into the descriptor. */
 2483                 error = xl_encap(sc, cur_tx, &m_head);
 2484                 if (error) {
 2485                         cur_tx = prev_tx;
 2486                         if (m_head == NULL)
 2487                                 break;
 2488                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2489                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2490                         break;
 2491                 }
 2492 
 2493                 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
 2494                 cur_tx->xl_next = NULL;
 2495 
 2496                 /* Chain it together. */
 2497                 if (prev != NULL) {
 2498                         prev->xl_next = cur_tx;
 2499                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2500                 }
 2501                 prev = cur_tx;
 2502 
 2503                 /*
 2504                  * If there's a BPF listener, bounce a copy of this frame
 2505                  * to him.
 2506                  */
 2507                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2508         }
 2509 
 2510         /*
 2511          * If there are no packets queued, bail.
 2512          */
 2513         if (cur_tx == NULL)
 2514                 return;
 2515 
 2516         /*
 2517          * Place the request for the upload interrupt
 2518          * in the last descriptor in the chain. This way, if
 2519          * we're chaining several packets at once, we'll only
 2520          * get an interrupt once for the whole chain rather than
 2521          * once for each packet.
 2522          */
 2523         cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
 2524 
 2525         /*
 2526          * Queue the packets. If the TX channel is clear, update
 2527          * the downlist pointer register.
 2528          */
 2529         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2530         xl_wait(sc);
 2531 
 2532         if (sc->xl_cdata.xl_tx_head != NULL) {
 2533                 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
 2534                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
 2535                     htole32(start_tx->xl_phys);
 2536                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
 2537                     htole32(~XL_TXSTAT_DL_INTR);
 2538                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2539         } else {
 2540                 sc->xl_cdata.xl_tx_head = start_tx;
 2541                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2542         }
 2543         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2544             BUS_DMASYNC_PREWRITE);
 2545         if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2546                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
 2547 
 2548         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2549 
 2550         XL_SEL_WIN(7);
 2551 
 2552         /*
 2553          * Set a timeout in case the chip goes out to lunch.
 2554          */
 2555         sc->xl_wdog_timer = 5;
 2556 
 2557         /*
 2558          * XXX Under certain conditions, usually on slower machines
 2559          * where interrupts may be dropped, it's possible for the
 2560          * adapter to chew up all the buffers in the receive ring
 2561          * and stall, without us being able to do anything about it.
 2562          * To guard against this, we need to make a pass over the
 2563          * RX queue to make sure there aren't any packets pending.
 2564          * Doing it here means we can flush the receive ring at the
 2565          * same time the chip is DMAing the transmit descriptors we
 2566          * just gave it.
 2567          *
 2568          * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
 2569          * nature of their chips in all their marketing literature;
 2570          * we may as well take advantage of it. :)
 2571          */
 2572         taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
 2573 }
 2574 
 2575 static void
 2576 xl_start_90xB_locked(struct ifnet *ifp)
 2577 {
 2578         struct xl_softc         *sc = ifp->if_softc;
 2579         struct mbuf             *m_head;
 2580         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2581         struct xl_chain         *prev_tx;
 2582         int                     error, idx;
 2583 
 2584         XL_LOCK_ASSERT(sc);
 2585 
 2586         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2587             IFF_DRV_RUNNING)
 2588                 return;
 2589 
 2590         idx = sc->xl_cdata.xl_tx_prod;
 2591         start_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2592 
 2593         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2594             sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
 2595                 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
 2596                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2597                         break;
 2598                 }
 2599 
 2600                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2601                 if (m_head == NULL)
 2602                         break;
 2603 
 2604                 prev_tx = cur_tx;
 2605                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2606 
 2607                 /* Pack the data into the descriptor. */
 2608                 error = xl_encap(sc, cur_tx, &m_head);
 2609                 if (error) {
 2610                         cur_tx = prev_tx;
 2611                         if (m_head == NULL)
 2612                                 break;
 2613                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2614                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2615                         break;
 2616                 }
 2617 
 2618                 /* Chain it together. */
 2619                 if (prev != NULL)
 2620                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2621                 prev = cur_tx;
 2622 
 2623                 /*
 2624                  * If there's a BPF listener, bounce a copy of this frame
 2625                  * to him.
 2626                  */
 2627                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2628 
 2629                 XL_INC(idx, XL_TX_LIST_CNT);
 2630                 sc->xl_cdata.xl_tx_cnt++;
 2631         }
 2632 
 2633         /*
 2634          * If there are no packets queued, bail.
 2635          */
 2636         if (cur_tx == NULL)
 2637                 return;
 2638 
 2639         /*
 2640          * Place the request for the upload interrupt
 2641          * in the last descriptor in the chain. This way, if
 2642          * we're chaining several packets at once, we'll only
 2643          * get an interrupt once for the whole chain rather than
 2644          * once for each packet.
 2645          */
 2646         cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
 2647 
 2648         /* Start transmission */
 2649         sc->xl_cdata.xl_tx_prod = idx;
 2650         start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
 2651         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2652             BUS_DMASYNC_PREWRITE);
 2653 
 2654         /*
 2655          * Set a timeout in case the chip goes out to lunch.
 2656          */
 2657         sc->xl_wdog_timer = 5;
 2658 }
 2659 
 2660 static void
 2661 xl_init(void *xsc)
 2662 {
 2663         struct xl_softc         *sc = xsc;
 2664 
 2665         XL_LOCK(sc);
 2666         xl_init_locked(sc);
 2667         XL_UNLOCK(sc);
 2668 }
 2669 
 2670 static void
 2671 xl_init_locked(struct xl_softc *sc)
 2672 {
 2673         struct ifnet            *ifp = sc->xl_ifp;
 2674         int                     error, i;
 2675         struct mii_data         *mii = NULL;
 2676 
 2677         XL_LOCK_ASSERT(sc);
 2678 
 2679         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2680                 return;
 2681         /*
 2682          * Cancel pending I/O and free all RX/TX buffers.
 2683          */
 2684         xl_stop(sc);
 2685 
 2686         /* Reset the chip to a known state. */
 2687         xl_reset(sc);
 2688 
 2689         if (sc->xl_miibus == NULL) {
 2690                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2691                 xl_wait(sc);
 2692         }
 2693         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2694         xl_wait(sc);
 2695         DELAY(10000);
 2696 
 2697         if (sc->xl_miibus != NULL)
 2698                 mii = device_get_softc(sc->xl_miibus);
 2699 
 2700         /*
 2701          * Clear WOL status and disable all WOL feature as WOL
 2702          * would interfere Rx operation under normal environments.
 2703          */
 2704         if ((sc->xl_flags & XL_FLAG_WOL) != 0) {
 2705                 XL_SEL_WIN(7);
 2706                 CSR_READ_2(sc, XL_W7_BM_PME);
 2707                 CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
 2708         }
 2709         /* Init our MAC address */
 2710         XL_SEL_WIN(2);
 2711         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2712                 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
 2713                                 IF_LLADDR(sc->xl_ifp)[i]);
 2714         }
 2715 
 2716         /* Clear the station mask. */
 2717         for (i = 0; i < 3; i++)
 2718                 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
 2719 #ifdef notdef
 2720         /* Reset TX and RX. */
 2721         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2722         xl_wait(sc);
 2723         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2724         xl_wait(sc);
 2725 #endif
 2726         /* Init circular RX list. */
 2727         error = xl_list_rx_init(sc);
 2728         if (error) {
 2729                 device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
 2730                     error);
 2731                 xl_stop(sc);
 2732                 return;
 2733         }
 2734 
 2735         /* Init TX descriptors. */
 2736         if (sc->xl_type == XL_TYPE_905B)
 2737                 error = xl_list_tx_init_90xB(sc);
 2738         else
 2739                 error = xl_list_tx_init(sc);
 2740         if (error) {
 2741                 device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
 2742                     error);
 2743                 xl_stop(sc);
 2744                 return;
 2745         }
 2746 
 2747         /*
 2748          * Set the TX freethresh value.
 2749          * Note that this has no effect on 3c905B "cyclone"
 2750          * cards but is required for 3c900/3c905 "boomerang"
 2751          * cards in order to enable the download engine.
 2752          */
 2753         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2754 
 2755         /* Set the TX start threshold for best performance. */
 2756         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2757 
 2758         /*
 2759          * If this is a 3c905B, also set the tx reclaim threshold.
 2760          * This helps cut down on the number of tx reclaim errors
 2761          * that could happen on a busy network. The chip multiplies
 2762          * the register value by 16 to obtain the actual threshold
 2763          * in bytes, so we divide by 16 when setting the value here.
 2764          * The existing threshold value can be examined by reading
 2765          * the register at offset 9 in window 5.
 2766          */
 2767         if (sc->xl_type == XL_TYPE_905B) {
 2768                 CSR_WRITE_2(sc, XL_COMMAND,
 2769                     XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2770         }
 2771 
 2772         /* Set RX filter bits. */
 2773         xl_rxfilter(sc);
 2774 
 2775         /*
 2776          * Load the address of the RX list. We have to
 2777          * stall the upload engine before we can manipulate
 2778          * the uplist pointer register, then unstall it when
 2779          * we're finished. We also have to wait for the
 2780          * stall command to complete before proceeding.
 2781          * Note that we have to do this after any RX resets
 2782          * have completed since the uplist register is cleared
 2783          * by a reset.
 2784          */
 2785         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2786         xl_wait(sc);
 2787         CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2788         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2789         xl_wait(sc);
 2790 
 2791         if (sc->xl_type == XL_TYPE_905B) {
 2792                 /* Set polling interval */
 2793                 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2794                 /* Load the address of the TX list */
 2795                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2796                 xl_wait(sc);
 2797                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2798                     sc->xl_cdata.xl_tx_chain[0].xl_phys);
 2799                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2800                 xl_wait(sc);
 2801         }
 2802 
 2803         /*
 2804          * If the coax transceiver is on, make sure to enable
 2805          * the DC-DC converter.
 2806          */
 2807         XL_SEL_WIN(3);
 2808         if (sc->xl_xcvr == XL_XCVR_COAX)
 2809                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
 2810         else
 2811                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 2812 
 2813         /*
 2814          * increase packet size to allow reception of 802.1q or ISL packets.
 2815          * For the 3c90x chip, set the 'allow large packets' bit in the MAC
 2816          * control register. For 3c90xB/C chips, use the RX packet size
 2817          * register.
 2818          */
 2819 
 2820         if (sc->xl_type == XL_TYPE_905B)
 2821                 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
 2822         else {
 2823                 u_int8_t macctl;
 2824                 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
 2825                 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
 2826                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
 2827         }
 2828 
 2829         /* Clear out the stats counters. */
 2830         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 2831         xl_stats_update(sc);
 2832         XL_SEL_WIN(4);
 2833         CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
 2834         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
 2835 
 2836         /*
 2837          * Enable interrupts.
 2838          */
 2839         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 2840         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
 2841 #ifdef DEVICE_POLLING
 2842         /* Disable interrupts if we are polling. */
 2843         if (ifp->if_capenable & IFCAP_POLLING)
 2844                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 2845         else
 2846 #endif
 2847         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 2848         if (sc->xl_flags & XL_FLAG_FUNCREG)
 2849             bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 2850 
 2851         /* Set the RX early threshold */
 2852         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
 2853         CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
 2854 
 2855         /* Enable receiver and transmitter. */
 2856         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2857         xl_wait(sc);
 2858         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 2859         xl_wait(sc);
 2860 
 2861         /* XXX Downcall to miibus. */
 2862         if (mii != NULL)
 2863                 mii_mediachg(mii);
 2864 
 2865         /* Select window 7 for normal operations. */
 2866         XL_SEL_WIN(7);
 2867 
 2868         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2869         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2870 
 2871         sc->xl_wdog_timer = 0;
 2872         callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
 2873 }
 2874 
 2875 /*
 2876  * Set media options.
 2877  */
 2878 static int
 2879 xl_ifmedia_upd(struct ifnet *ifp)
 2880 {
 2881         struct xl_softc         *sc = ifp->if_softc;
 2882         struct ifmedia          *ifm = NULL;
 2883         struct mii_data         *mii = NULL;
 2884 
 2885         XL_LOCK(sc);
 2886 
 2887         if (sc->xl_miibus != NULL)
 2888                 mii = device_get_softc(sc->xl_miibus);
 2889         if (mii == NULL)
 2890                 ifm = &sc->ifmedia;
 2891         else
 2892                 ifm = &mii->mii_media;
 2893 
 2894         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 2895         case IFM_100_FX:
 2896         case IFM_10_FL:
 2897         case IFM_10_2:
 2898         case IFM_10_5:
 2899                 xl_setmode(sc, ifm->ifm_media);
 2900                 XL_UNLOCK(sc);
 2901                 return (0);
 2902         }
 2903 
 2904         if (sc->xl_media & XL_MEDIAOPT_MII ||
 2905             sc->xl_media & XL_MEDIAOPT_BTX ||
 2906             sc->xl_media & XL_MEDIAOPT_BT4) {
 2907                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2908                 xl_init_locked(sc);
 2909         } else {
 2910                 xl_setmode(sc, ifm->ifm_media);
 2911         }
 2912 
 2913         XL_UNLOCK(sc);
 2914 
 2915         return (0);
 2916 }
 2917 
 2918 /*
 2919  * Report current media status.
 2920  */
 2921 static void
 2922 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2923 {
 2924         struct xl_softc         *sc = ifp->if_softc;
 2925         u_int32_t               icfg;
 2926         u_int16_t               status = 0;
 2927         struct mii_data         *mii = NULL;
 2928 
 2929         XL_LOCK(sc);
 2930 
 2931         if (sc->xl_miibus != NULL)
 2932                 mii = device_get_softc(sc->xl_miibus);
 2933 
 2934         XL_SEL_WIN(4);
 2935         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 2936 
 2937         XL_SEL_WIN(3);
 2938         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
 2939         icfg >>= XL_ICFG_CONNECTOR_BITS;
 2940 
 2941         ifmr->ifm_active = IFM_ETHER;
 2942         ifmr->ifm_status = IFM_AVALID;
 2943 
 2944         if ((status & XL_MEDIASTAT_CARRIER) == 0)
 2945                 ifmr->ifm_status |= IFM_ACTIVE;
 2946 
 2947         switch (icfg) {
 2948         case XL_XCVR_10BT:
 2949                 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
 2950                 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 2951                         ifmr->ifm_active |= IFM_FDX;
 2952                 else
 2953                         ifmr->ifm_active |= IFM_HDX;
 2954                 break;
 2955         case XL_XCVR_AUI:
 2956                 if (sc->xl_type == XL_TYPE_905B &&
 2957                     sc->xl_media == XL_MEDIAOPT_10FL) {
 2958                         ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
 2959                         if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 2960                                 ifmr->ifm_active |= IFM_FDX;
 2961                         else
 2962                                 ifmr->ifm_active |= IFM_HDX;
 2963                 } else
 2964                         ifmr->ifm_active = IFM_ETHER|IFM_10_5;
 2965                 break;
 2966         case XL_XCVR_COAX:
 2967                 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
 2968                 break;
 2969         /*
 2970          * XXX MII and BTX/AUTO should be separate cases.
 2971          */
 2972 
 2973         case XL_XCVR_100BTX:
 2974         case XL_XCVR_AUTO:
 2975         case XL_XCVR_MII:
 2976                 if (mii != NULL) {
 2977                         mii_pollstat(mii);
 2978                         ifmr->ifm_active = mii->mii_media_active;
 2979                         ifmr->ifm_status = mii->mii_media_status;
 2980                 }
 2981                 break;
 2982         case XL_XCVR_100BFX:
 2983                 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
 2984                 break;
 2985         default:
 2986                 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
 2987                 break;
 2988         }
 2989 
 2990         XL_UNLOCK(sc);
 2991 }
 2992 
 2993 static int
 2994 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2995 {
 2996         struct xl_softc         *sc = ifp->if_softc;
 2997         struct ifreq            *ifr = (struct ifreq *) data;
 2998         int                     error = 0, mask;
 2999         struct mii_data         *mii = NULL;
 3000 
 3001         switch (command) {
 3002         case SIOCSIFFLAGS:
 3003                 XL_LOCK(sc);
 3004                 if (ifp->if_flags & IFF_UP) {
 3005                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3006                             (ifp->if_flags ^ sc->xl_if_flags) &
 3007                             (IFF_PROMISC | IFF_ALLMULTI))
 3008                                 xl_rxfilter(sc);
 3009                         else
 3010                                 xl_init_locked(sc);
 3011                 } else {
 3012                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3013                                 xl_stop(sc);
 3014                 }
 3015                 sc->xl_if_flags = ifp->if_flags;
 3016                 XL_UNLOCK(sc);
 3017                 break;
 3018         case SIOCADDMULTI:
 3019         case SIOCDELMULTI:
 3020                 /* XXX Downcall from if_addmulti() possibly with locks held. */
 3021                 XL_LOCK(sc);
 3022                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3023                         xl_rxfilter(sc);
 3024                 XL_UNLOCK(sc);
 3025                 break;
 3026         case SIOCGIFMEDIA:
 3027         case SIOCSIFMEDIA:
 3028                 if (sc->xl_miibus != NULL)
 3029                         mii = device_get_softc(sc->xl_miibus);
 3030                 if (mii == NULL)
 3031                         error = ifmedia_ioctl(ifp, ifr,
 3032                             &sc->ifmedia, command);
 3033                 else
 3034                         error = ifmedia_ioctl(ifp, ifr,
 3035                             &mii->mii_media, command);
 3036                 break;
 3037         case SIOCSIFCAP:
 3038                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3039 #ifdef DEVICE_POLLING
 3040                 if ((mask & IFCAP_POLLING) != 0 &&
 3041                     (ifp->if_capabilities & IFCAP_POLLING) != 0) {
 3042                         ifp->if_capenable ^= IFCAP_POLLING;
 3043                         if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
 3044                                 error = ether_poll_register(xl_poll, ifp);
 3045                                 if (error)
 3046                                         break;
 3047                                 XL_LOCK(sc);
 3048                                 /* Disable interrupts */
 3049                                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3050                                 ifp->if_capenable |= IFCAP_POLLING;
 3051                                 XL_UNLOCK(sc);
 3052                         } else {
 3053                                 error = ether_poll_deregister(ifp);
 3054                                 /* Enable interrupts. */
 3055                                 XL_LOCK(sc);
 3056                                 CSR_WRITE_2(sc, XL_COMMAND,
 3057                                     XL_CMD_INTR_ACK | 0xFF);
 3058                                 CSR_WRITE_2(sc, XL_COMMAND,
 3059                                     XL_CMD_INTR_ENB | XL_INTRS);
 3060                                 if (sc->xl_flags & XL_FLAG_FUNCREG)
 3061                                         bus_space_write_4(sc->xl_ftag,
 3062                                             sc->xl_fhandle, 4, 0x8000);
 3063                                 XL_UNLOCK(sc);
 3064                         }
 3065                 }
 3066 #endif /* DEVICE_POLLING */
 3067                 XL_LOCK(sc);
 3068                 if ((mask & IFCAP_TXCSUM) != 0 &&
 3069                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 3070                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3071                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 3072                                 ifp->if_hwassist |= XL905B_CSUM_FEATURES;
 3073                         else
 3074                                 ifp->if_hwassist &= ~XL905B_CSUM_FEATURES;
 3075                 }
 3076                 if ((mask & IFCAP_RXCSUM) != 0 &&
 3077                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
 3078                         ifp->if_capenable ^= IFCAP_RXCSUM;
 3079                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 3080                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 3081                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 3082                 XL_UNLOCK(sc);
 3083                 break;
 3084         default:
 3085                 error = ether_ioctl(ifp, command, data);
 3086                 break;
 3087         }
 3088 
 3089         return (error);
 3090 }
 3091 
 3092 static int
 3093 xl_watchdog(struct xl_softc *sc)
 3094 {
 3095         struct ifnet            *ifp = sc->xl_ifp;
 3096         u_int16_t               status = 0;
 3097         int                     misintr;
 3098 
 3099         XL_LOCK_ASSERT(sc);
 3100 
 3101         if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
 3102                 return (0);
 3103 
 3104         xl_rxeof(sc);
 3105         xl_txeoc(sc);
 3106         misintr = 0;
 3107         if (sc->xl_type == XL_TYPE_905B) {
 3108                 xl_txeof_90xB(sc);
 3109                 if (sc->xl_cdata.xl_tx_cnt == 0)
 3110                         misintr++;
 3111         } else {
 3112                 xl_txeof(sc);
 3113                 if (sc->xl_cdata.xl_tx_head == NULL)
 3114                         misintr++;
 3115         }
 3116         if (misintr != 0) {
 3117                 device_printf(sc->xl_dev,
 3118                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 3119                 return (0);
 3120         }
 3121 
 3122         ifp->if_oerrors++;
 3123         XL_SEL_WIN(4);
 3124         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3125         device_printf(sc->xl_dev, "watchdog timeout\n");
 3126 
 3127         if (status & XL_MEDIASTAT_CARRIER)
 3128                 device_printf(sc->xl_dev,
 3129                     "no carrier - transceiver cable problem?\n");
 3130 
 3131         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3132         xl_init_locked(sc);
 3133 
 3134         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 3135                 if (sc->xl_type == XL_TYPE_905B)
 3136                         xl_start_90xB_locked(ifp);
 3137                 else
 3138                         xl_start_locked(ifp);
 3139         }
 3140 
 3141         return (EJUSTRETURN);
 3142 }
 3143 
 3144 /*
 3145  * Stop the adapter and free any mbufs allocated to the
 3146  * RX and TX lists.
 3147  */
 3148 static void
 3149 xl_stop(struct xl_softc *sc)
 3150 {
 3151         register int            i;
 3152         struct ifnet            *ifp = sc->xl_ifp;
 3153 
 3154         XL_LOCK_ASSERT(sc);
 3155 
 3156         sc->xl_wdog_timer = 0;
 3157 
 3158         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
 3159         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 3160         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
 3161         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
 3162         xl_wait(sc);
 3163         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
 3164         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 3165         DELAY(800);
 3166 
 3167 #ifdef foo
 3168         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 3169         xl_wait(sc);
 3170         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 3171         xl_wait(sc);
 3172 #endif
 3173 
 3174         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
 3175         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
 3176         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3177         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3178                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 3179 
 3180         /* Stop the stats updater. */
 3181         callout_stop(&sc->xl_tick_callout);
 3182 
 3183         /*
 3184          * Free data in the RX lists.
 3185          */
 3186         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 3187                 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
 3188                         bus_dmamap_unload(sc->xl_mtag,
 3189                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3190                         bus_dmamap_destroy(sc->xl_mtag,
 3191                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3192                         m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
 3193                         sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
 3194                 }
 3195         }
 3196         if (sc->xl_ldata.xl_rx_list != NULL)
 3197                 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
 3198         /*
 3199          * Free the TX list buffers.
 3200          */
 3201         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 3202                 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
 3203                         bus_dmamap_unload(sc->xl_mtag,
 3204                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3205                         bus_dmamap_destroy(sc->xl_mtag,
 3206                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3207                         m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
 3208                         sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
 3209                 }
 3210         }
 3211         if (sc->xl_ldata.xl_tx_list != NULL)
 3212                 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
 3213 
 3214         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3215 }
 3216 
 3217 /*
 3218  * Stop all chip I/O so that the kernel's probe routines don't
 3219  * get confused by errant DMAs when rebooting.
 3220  */
 3221 static int
 3222 xl_shutdown(device_t dev)
 3223 {
 3224 
 3225         return (xl_suspend(dev));
 3226 }
 3227 
 3228 static int
 3229 xl_suspend(device_t dev)
 3230 {
 3231         struct xl_softc         *sc;
 3232 
 3233         sc = device_get_softc(dev);
 3234 
 3235         XL_LOCK(sc);
 3236         xl_stop(sc);
 3237         xl_setwol(sc);
 3238         XL_UNLOCK(sc);
 3239 
 3240         return (0);
 3241 }
 3242 
 3243 static int
 3244 xl_resume(device_t dev)
 3245 {
 3246         struct xl_softc         *sc;
 3247         struct ifnet            *ifp;
 3248 
 3249         sc = device_get_softc(dev);
 3250         ifp = sc->xl_ifp;
 3251 
 3252         XL_LOCK(sc);
 3253 
 3254         if (ifp->if_flags & IFF_UP) {
 3255                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3256                 xl_init_locked(sc);
 3257         }
 3258 
 3259         XL_UNLOCK(sc);
 3260 
 3261         return (0);
 3262 }
 3263 
 3264 static void
 3265 xl_setwol(struct xl_softc *sc)
 3266 {
 3267         struct ifnet            *ifp;
 3268         u_int16_t               cfg, pmstat;
 3269 
 3270         if ((sc->xl_flags & XL_FLAG_WOL) == 0)
 3271                 return;
 3272 
 3273         ifp = sc->xl_ifp;
 3274         XL_SEL_WIN(7);
 3275         /* Clear any pending PME events. */
 3276         CSR_READ_2(sc, XL_W7_BM_PME);
 3277         cfg = 0;
 3278         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3279                 cfg |= XL_BM_PME_MAGIC;
 3280         CSR_WRITE_2(sc, XL_W7_BM_PME, cfg);
 3281         /* Enable RX. */
 3282         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3283                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 3284         /* Request PME. */
 3285         pmstat = pci_read_config(sc->xl_dev,
 3286             sc->xl_pmcap + PCIR_POWER_STATUS, 2);
 3287         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3288                 pmstat |= PCIM_PSTAT_PMEENABLE;
 3289         else
 3290                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 3291         pci_write_config(sc->xl_dev,
 3292             sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2);
 3293 }

Cache object: 6f5c495ddb52db87486a28f37c7fcb68


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.