The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/xl/if_xl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 /*
   37  * 3Com 3c90x Etherlink XL PCI NIC driver
   38  *
   39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
   40  * bus-master chips (3c90x cards and embedded controllers) including
   41  * the following:
   42  *
   43  * 3Com 3c900-TPO       10Mbps/RJ-45
   44  * 3Com 3c900-COMBO     10Mbps/RJ-45,AUI,BNC
   45  * 3Com 3c905-TX        10/100Mbps/RJ-45
   46  * 3Com 3c905-T4        10/100Mbps/RJ-45
   47  * 3Com 3c900B-TPO      10Mbps/RJ-45
   48  * 3Com 3c900B-COMBO    10Mbps/RJ-45,AUI,BNC
   49  * 3Com 3c900B-TPC      10Mbps/RJ-45,BNC
   50  * 3Com 3c900B-FL       10Mbps/Fiber-optic
   51  * 3Com 3c905B-COMBO    10/100Mbps/RJ-45,AUI,BNC
   52  * 3Com 3c905B-TX       10/100Mbps/RJ-45
   53  * 3Com 3c905B-FL/FX    10/100Mbps/Fiber-optic
   54  * 3Com 3c905C-TX       10/100Mbps/RJ-45 (Tornado ASIC)
   55  * 3Com 3c980-TX        10/100Mbps server adapter (Hurricane ASIC)
   56  * 3Com 3c980C-TX       10/100Mbps server adapter (Tornado ASIC)
   57  * 3Com 3cSOHO100-TX    10/100Mbps/RJ-45 (Hurricane ASIC)
   58  * 3Com 3c450-TX        10/100Mbps/RJ-45 (Tornado ASIC)
   59  * 3Com 3c555           10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
   60  * 3Com 3c556           10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   61  * 3Com 3c556B          10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   62  * 3Com 3c575TX         10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   63  * 3Com 3c575B          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   64  * 3Com 3c575C          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   65  * 3Com 3cxfem656       10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   66  * 3Com 3cxfem656b      10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   67  * 3Com 3cxfem656c      10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
   68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
   69  * Dell on-board 3c920 10/100Mbps/RJ-45
   70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
   71  * Dell Latitude laptop docking station embedded 3c905-TX
   72  *
   73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   74  * Electrical Engineering Department
   75  * Columbia University, New York City
   76  */
   77 /*
   78  * The 3c90x series chips use a bus-master DMA interface for transfering
   79  * packets to and from the controller chip. Some of the "vortex" cards
   80  * (3c59x) also supported a bus master mode, however for those chips
   81  * you could only DMA packets to/from a contiguous memory buffer. For
   82  * transmission this would mean copying the contents of the queued mbuf
   83  * chain into an mbuf cluster and then DMAing the cluster. This extra
   84  * copy would sort of defeat the purpose of the bus master support for
   85  * any packet that doesn't fit into a single mbuf.
   86  *
   87  * By contrast, the 3c90x cards support a fragment-based bus master
   88  * mode where mbuf chains can be encapsulated using TX descriptors.
   89  * This is similar to other PCI chips such as the Texas Instruments
   90  * ThunderLAN and the Intel 82557/82558.
   91  *
   92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
   93  * bus master chips because they maintain the old PIO interface for
   94  * backwards compatibility, but starting with the 3c905B and the
   95  * "cyclone" chips, the compatibility interface has been dropped.
   96  * Since using bus master DMA is a big win, we use this driver to
   97  * support the PCI "boomerang" chips even though they work with the
   98  * "vortex" driver in order to obtain better performance.
   99  */
  100 
  101 #ifdef HAVE_KERNEL_OPTION_HEADERS
  102 #include "opt_device_polling.h"
  103 #endif
  104 
  105 #include <sys/param.h>
  106 #include <sys/systm.h>
  107 #include <sys/sockio.h>
  108 #include <sys/endian.h>
  109 #include <sys/mbuf.h>
  110 #include <sys/kernel.h>
  111 #include <sys/module.h>
  112 #include <sys/socket.h>
  113 #include <sys/taskqueue.h>
  114 
  115 #include <net/if.h>
  116 #include <net/if_arp.h>
  117 #include <net/ethernet.h>
  118 #include <net/if_dl.h>
  119 #include <net/if_media.h>
  120 #include <net/if_types.h>
  121 
  122 #include <net/bpf.h>
  123 
  124 #include <machine/bus.h>
  125 #include <machine/resource.h>
  126 #include <sys/bus.h>
  127 #include <sys/rman.h>
  128 
  129 #include <dev/mii/mii.h>
  130 #include <dev/mii/mii_bitbang.h>
  131 #include <dev/mii/miivar.h>
  132 
  133 #include <dev/pci/pcireg.h>
  134 #include <dev/pci/pcivar.h>
  135 
  136 MODULE_DEPEND(xl, pci, 1, 1, 1);
  137 MODULE_DEPEND(xl, ether, 1, 1, 1);
  138 MODULE_DEPEND(xl, miibus, 1, 1, 1);
  139 
  140 /* "device miibus" required.  See GENERIC if you get errors here. */
  141 #include "miibus_if.h"
  142 
  143 #include <dev/xl/if_xlreg.h>
  144 
  145 /*
  146  * TX Checksumming is disabled by default for two reasons:
  147  * - TX Checksumming will occasionally produce corrupt packets
  148  * - TX Checksumming seems to reduce performance
  149  *
  150  * Only 905B/C cards were reported to have this problem, it is possible
  151  * that later chips _may_ be immune.
  152  */
  153 #define XL905B_TXCSUM_BROKEN    1
  154 
  155 #ifdef XL905B_TXCSUM_BROKEN
  156 #define XL905B_CSUM_FEATURES    0
  157 #else
  158 #define XL905B_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  159 #endif
  160 
  161 /*
  162  * Various supported device vendors/types and their names.
  163  */
  164 static const struct xl_type xl_devs[] = {
  165         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
  166                 "3Com 3c900-TPO Etherlink XL" },
  167         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
  168                 "3Com 3c900-COMBO Etherlink XL" },
  169         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
  170                 "3Com 3c905-TX Fast Etherlink XL" },
  171         { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
  172                 "3Com 3c905-T4 Fast Etherlink XL" },
  173         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
  174                 "3Com 3c900B-TPO Etherlink XL" },
  175         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
  176                 "3Com 3c900B-COMBO Etherlink XL" },
  177         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
  178                 "3Com 3c900B-TPC Etherlink XL" },
  179         { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
  180                 "3Com 3c900B-FL Etherlink XL" },
  181         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
  182                 "3Com 3c905B-TX Fast Etherlink XL" },
  183         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
  184                 "3Com 3c905B-T4 Fast Etherlink XL" },
  185         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
  186                 "3Com 3c905B-FX/SC Fast Etherlink XL" },
  187         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
  188                 "3Com 3c905B-COMBO Fast Etherlink XL" },
  189         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
  190                 "3Com 3c905C-TX Fast Etherlink XL" },
  191         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
  192                 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
  193         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
  194                 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
  195         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
  196                 "3Com 3c980 Fast Etherlink XL" },
  197         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
  198                 "3Com 3c980C Fast Etherlink XL" },
  199         { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
  200                 "3Com 3cSOHO100-TX OfficeConnect" },
  201         { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
  202                 "3Com 3c450-TX HomeConnect" },
  203         { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
  204                 "3Com 3c555 Fast Etherlink XL" },
  205         { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
  206                 "3Com 3c556 Fast Etherlink XL" },
  207         { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
  208                 "3Com 3c556B Fast Etherlink XL" },
  209         { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
  210                 "3Com 3c575TX Fast Etherlink XL" },
  211         { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
  212                 "3Com 3c575B Fast Etherlink XL" },
  213         { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
  214                 "3Com 3c575C Fast Etherlink XL" },
  215         { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
  216                 "3Com 3c656 Fast Etherlink XL" },
  217         { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
  218                 "3Com 3c656B Fast Etherlink XL" },
  219         { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
  220                 "3Com 3c656C Fast Etherlink XL" },
  221         { 0, 0, NULL }
  222 };
  223 
  224 static int xl_probe(device_t);
  225 static int xl_attach(device_t);
  226 static int xl_detach(device_t);
  227 
  228 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
  229 static void xl_tick(void *);
  230 static void xl_stats_update(struct xl_softc *);
  231 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
  232 static int xl_rxeof(struct xl_softc *);
  233 static void xl_rxeof_task(void *, int);
  234 static int xl_rx_resync(struct xl_softc *);
  235 static void xl_txeof(struct xl_softc *);
  236 static void xl_txeof_90xB(struct xl_softc *);
  237 static void xl_txeoc(struct xl_softc *);
  238 static void xl_intr(void *);
  239 static void xl_start(struct ifnet *);
  240 static void xl_start_locked(struct ifnet *);
  241 static void xl_start_90xB_locked(struct ifnet *);
  242 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
  243 static void xl_init(void *);
  244 static void xl_init_locked(struct xl_softc *);
  245 static void xl_stop(struct xl_softc *);
  246 static int xl_watchdog(struct xl_softc *);
  247 static int xl_shutdown(device_t);
  248 static int xl_suspend(device_t);
  249 static int xl_resume(device_t);
  250 static void xl_setwol(struct xl_softc *);
  251 
  252 #ifdef DEVICE_POLLING
  253 static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
  254 static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
  255 #endif
  256 
  257 static int xl_ifmedia_upd(struct ifnet *);
  258 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  259 
  260 static int xl_eeprom_wait(struct xl_softc *);
  261 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
  262 
  263 static void xl_rxfilter(struct xl_softc *);
  264 static void xl_rxfilter_90x(struct xl_softc *);
  265 static void xl_rxfilter_90xB(struct xl_softc *);
  266 static void xl_setcfg(struct xl_softc *);
  267 static void xl_setmode(struct xl_softc *, int);
  268 static void xl_reset(struct xl_softc *);
  269 static int xl_list_rx_init(struct xl_softc *);
  270 static int xl_list_tx_init(struct xl_softc *);
  271 static int xl_list_tx_init_90xB(struct xl_softc *);
  272 static void xl_wait(struct xl_softc *);
  273 static void xl_mediacheck(struct xl_softc *);
  274 static void xl_choose_media(struct xl_softc *sc, int *media);
  275 static void xl_choose_xcvr(struct xl_softc *, int);
  276 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  277 #ifdef notdef
  278 static void xl_testpacket(struct xl_softc *);
  279 #endif
  280 
  281 static int xl_miibus_readreg(device_t, int, int);
  282 static int xl_miibus_writereg(device_t, int, int, int);
  283 static void xl_miibus_statchg(device_t);
  284 static void xl_miibus_mediainit(device_t);
  285 
  286 /*
  287  * MII bit-bang glue
  288  */
  289 static uint32_t xl_mii_bitbang_read(device_t);
  290 static void xl_mii_bitbang_write(device_t, uint32_t);
  291 
  292 static const struct mii_bitbang_ops xl_mii_bitbang_ops = {
  293         xl_mii_bitbang_read,
  294         xl_mii_bitbang_write,
  295         {
  296                 XL_MII_DATA,            /* MII_BIT_MDO */
  297                 XL_MII_DATA,            /* MII_BIT_MDI */
  298                 XL_MII_CLK,             /* MII_BIT_MDC */
  299                 XL_MII_DIR,             /* MII_BIT_DIR_HOST_PHY */
  300                 0,                      /* MII_BIT_DIR_PHY_HOST */
  301         }
  302 };
  303 
  304 static device_method_t xl_methods[] = {
  305         /* Device interface */
  306         DEVMETHOD(device_probe,         xl_probe),
  307         DEVMETHOD(device_attach,        xl_attach),
  308         DEVMETHOD(device_detach,        xl_detach),
  309         DEVMETHOD(device_shutdown,      xl_shutdown),
  310         DEVMETHOD(device_suspend,       xl_suspend),
  311         DEVMETHOD(device_resume,        xl_resume),
  312 
  313         /* MII interface */
  314         DEVMETHOD(miibus_readreg,       xl_miibus_readreg),
  315         DEVMETHOD(miibus_writereg,      xl_miibus_writereg),
  316         DEVMETHOD(miibus_statchg,       xl_miibus_statchg),
  317         DEVMETHOD(miibus_mediainit,     xl_miibus_mediainit),
  318 
  319         DEVMETHOD_END
  320 };
  321 
  322 static driver_t xl_driver = {
  323         "xl",
  324         xl_methods,
  325         sizeof(struct xl_softc)
  326 };
  327 
  328 static devclass_t xl_devclass;
  329 
  330 DRIVER_MODULE_ORDERED(xl, pci, xl_driver, xl_devclass, NULL, NULL,
  331     SI_ORDER_ANY);
  332 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, NULL, NULL);
  333 
  334 static void
  335 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  336 {
  337         u_int32_t *paddr;
  338 
  339         paddr = arg;
  340         *paddr = segs->ds_addr;
  341 }
  342 
  343 /*
  344  * Murphy's law says that it's possible the chip can wedge and
  345  * the 'command in progress' bit may never clear. Hence, we wait
  346  * only a finite amount of time to avoid getting caught in an
  347  * infinite loop. Normally this delay routine would be a macro,
  348  * but it isn't called during normal operation so we can afford
  349  * to make it a function.  Suppress warning when card gone.
  350  */
  351 static void
  352 xl_wait(struct xl_softc *sc)
  353 {
  354         register int            i;
  355 
  356         for (i = 0; i < XL_TIMEOUT; i++) {
  357                 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
  358                         break;
  359         }
  360 
  361         if (i == XL_TIMEOUT && bus_child_present(sc->xl_dev))
  362                 device_printf(sc->xl_dev, "command never completed!\n");
  363 }
  364 
  365 /*
  366  * MII access routines are provided for adapters with external
  367  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
  368  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
  369  * Note: if you don't perform the MDIO operations just right,
  370  * it's possible to end up with code that works correctly with
  371  * some chips/CPUs/processor speeds/bus speeds/etc but not
  372  * with others.
  373  */
  374 
  375 /*
  376  * Read the MII serial port for the MII bit-bang module.
  377  */
  378 static uint32_t
  379 xl_mii_bitbang_read(device_t dev)
  380 {
  381         struct xl_softc         *sc;
  382         uint32_t                val;
  383 
  384         sc = device_get_softc(dev);
  385 
  386         /* We're already in window 4. */
  387         val = CSR_READ_2(sc, XL_W4_PHY_MGMT);
  388         CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
  389             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  390 
  391         return (val);
  392 }
  393 
  394 /*
  395  * Write the MII serial port for the MII bit-bang module.
  396  */
  397 static void
  398 xl_mii_bitbang_write(device_t dev, uint32_t val)
  399 {
  400         struct xl_softc         *sc;
  401 
  402         sc = device_get_softc(dev);
  403 
  404         /* We're already in window 4. */
  405         CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val);
  406         CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2,
  407             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  408 }
  409 
  410 static int
  411 xl_miibus_readreg(device_t dev, int phy, int reg)
  412 {
  413         struct xl_softc         *sc;
  414 
  415         sc = device_get_softc(dev);
  416 
  417         /* Select the window 4. */
  418         XL_SEL_WIN(4);
  419 
  420         return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg));
  421 }
  422 
  423 static int
  424 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
  425 {
  426         struct xl_softc         *sc;
  427 
  428         sc = device_get_softc(dev);
  429 
  430         /* Select the window 4. */
  431         XL_SEL_WIN(4);
  432 
  433         mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data);
  434 
  435         return (0);
  436 }
  437 
  438 static void
  439 xl_miibus_statchg(device_t dev)
  440 {
  441         struct xl_softc         *sc;
  442         struct mii_data         *mii;
  443         uint8_t                 macctl;
  444 
  445         sc = device_get_softc(dev);
  446         mii = device_get_softc(sc->xl_miibus);
  447 
  448         xl_setcfg(sc);
  449 
  450         /* Set ASIC's duplex mode to match the PHY. */
  451         XL_SEL_WIN(3);
  452         macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
  453         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  454                 macctl |= XL_MACCTRL_DUPLEX;
  455                 if (sc->xl_type == XL_TYPE_905B) {
  456                         if ((IFM_OPTIONS(mii->mii_media_active) &
  457                             IFM_ETH_RXPAUSE) != 0)
  458                                 macctl |= XL_MACCTRL_FLOW_CONTROL_ENB;
  459                         else
  460                                 macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  461                 }
  462         } else {
  463                 macctl &= ~XL_MACCTRL_DUPLEX;
  464                 if (sc->xl_type == XL_TYPE_905B)
  465                         macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  466         }
  467         CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
  468 }
  469 
  470 /*
  471  * Special support for the 3c905B-COMBO. This card has 10/100 support
  472  * plus BNC and AUI ports. This means we will have both an miibus attached
  473  * plus some non-MII media settings. In order to allow this, we have to
  474  * add the extra media to the miibus's ifmedia struct, but we can't do
  475  * that during xl_attach() because the miibus hasn't been attached yet.
  476  * So instead, we wait until the miibus probe/attach is done, at which
  477  * point we will get a callback telling is that it's safe to add our
  478  * extra media.
  479  */
  480 static void
  481 xl_miibus_mediainit(device_t dev)
  482 {
  483         struct xl_softc         *sc;
  484         struct mii_data         *mii;
  485         struct ifmedia          *ifm;
  486 
  487         sc = device_get_softc(dev);
  488         mii = device_get_softc(sc->xl_miibus);
  489         ifm = &mii->mii_media;
  490 
  491         if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
  492                 /*
  493                  * Check for a 10baseFL board in disguise.
  494                  */
  495                 if (sc->xl_type == XL_TYPE_905B &&
  496                     sc->xl_media == XL_MEDIAOPT_10FL) {
  497                         if (bootverbose)
  498                                 device_printf(sc->xl_dev, "found 10baseFL\n");
  499                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
  500                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
  501                             NULL);
  502                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
  503                                 ifmedia_add(ifm,
  504                                     IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
  505                 } else {
  506                         if (bootverbose)
  507                                 device_printf(sc->xl_dev, "found AUI\n");
  508                         ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
  509                 }
  510         }
  511 
  512         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  513                 if (bootverbose)
  514                         device_printf(sc->xl_dev, "found BNC\n");
  515                 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
  516         }
  517 }
  518 
  519 /*
  520  * The EEPROM is slow: give it time to come ready after issuing
  521  * it a command.
  522  */
  523 static int
  524 xl_eeprom_wait(struct xl_softc *sc)
  525 {
  526         int                     i;
  527 
  528         for (i = 0; i < 100; i++) {
  529                 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
  530                         DELAY(162);
  531                 else
  532                         break;
  533         }
  534 
  535         if (i == 100) {
  536                 device_printf(sc->xl_dev, "eeprom failed to come ready\n");
  537                 return (1);
  538         }
  539 
  540         return (0);
  541 }
  542 
  543 /*
  544  * Read a sequence of words from the EEPROM. Note that ethernet address
  545  * data is stored in the EEPROM in network byte order.
  546  */
  547 static int
  548 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
  549 {
  550         int                     err = 0, i;
  551         u_int16_t               word = 0, *ptr;
  552 
  553 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
  554 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
  555         /*
  556          * XXX: WARNING! DANGER!
  557          * It's easy to accidentally overwrite the rom content!
  558          * Note: the 3c575 uses 8bit EEPROM offsets.
  559          */
  560         XL_SEL_WIN(0);
  561 
  562         if (xl_eeprom_wait(sc))
  563                 return (1);
  564 
  565         if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
  566                 off += 0x30;
  567 
  568         for (i = 0; i < cnt; i++) {
  569                 if (sc->xl_flags & XL_FLAG_8BITROM)
  570                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  571                             XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
  572                 else
  573                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  574                             XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
  575                 err = xl_eeprom_wait(sc);
  576                 if (err)
  577                         break;
  578                 word = CSR_READ_2(sc, XL_W0_EE_DATA);
  579                 ptr = (u_int16_t *)(dest + (i * 2));
  580                 if (swap)
  581                         *ptr = ntohs(word);
  582                 else
  583                         *ptr = word;
  584         }
  585 
  586         return (err ? 1 : 0);
  587 }
  588 
  589 static void
  590 xl_rxfilter(struct xl_softc *sc)
  591 {
  592 
  593         if (sc->xl_type == XL_TYPE_905B)
  594                 xl_rxfilter_90xB(sc);
  595         else
  596                 xl_rxfilter_90x(sc);
  597 }
  598 
  599 /*
  600  * NICs older than the 3c905B have only one multicast option, which
  601  * is to enable reception of all multicast frames.
  602  */
  603 static void
  604 xl_rxfilter_90x(struct xl_softc *sc)
  605 {
  606         struct ifnet            *ifp;
  607         struct ifmultiaddr      *ifma;
  608         u_int8_t                rxfilt;
  609 
  610         XL_LOCK_ASSERT(sc);
  611 
  612         ifp = sc->xl_ifp;
  613 
  614         XL_SEL_WIN(5);
  615         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  616         rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
  617             XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
  618 
  619         /* Set the individual bit to receive frames for this host only. */
  620         rxfilt |= XL_RXFILTER_INDIVIDUAL;
  621         /* Set capture broadcast bit to capture broadcast frames. */
  622         if (ifp->if_flags & IFF_BROADCAST)
  623                 rxfilt |= XL_RXFILTER_BROADCAST;
  624 
  625         /* If we want promiscuous mode, set the allframes bit. */
  626         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  627                 if (ifp->if_flags & IFF_PROMISC)
  628                         rxfilt |= XL_RXFILTER_ALLFRAMES;
  629                 if (ifp->if_flags & IFF_ALLMULTI)
  630                         rxfilt |= XL_RXFILTER_ALLMULTI;
  631         } else {
  632                 if_maddr_rlock(ifp);
  633                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  634                         if (ifma->ifma_addr->sa_family != AF_LINK)
  635                                 continue;
  636                         rxfilt |= XL_RXFILTER_ALLMULTI;
  637                         break;
  638                 }
  639                 if_maddr_runlock(ifp);
  640         }
  641 
  642         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  643         XL_SEL_WIN(7);
  644 }
  645 
  646 /*
  647  * 3c905B adapters have a hash filter that we can program.
  648  */
  649 static void
  650 xl_rxfilter_90xB(struct xl_softc *sc)
  651 {
  652         struct ifnet            *ifp;
  653         struct ifmultiaddr      *ifma;
  654         int                     i, mcnt;
  655         u_int16_t               h;
  656         u_int8_t                rxfilt;
  657 
  658         XL_LOCK_ASSERT(sc);
  659 
  660         ifp = sc->xl_ifp;
  661 
  662         XL_SEL_WIN(5);
  663         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  664         rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
  665             XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
  666             XL_RXFILTER_MULTIHASH);
  667 
  668         /* Set the individual bit to receive frames for this host only. */
  669         rxfilt |= XL_RXFILTER_INDIVIDUAL;
  670         /* Set capture broadcast bit to capture broadcast frames. */
  671         if (ifp->if_flags & IFF_BROADCAST)
  672                 rxfilt |= XL_RXFILTER_BROADCAST;
  673 
  674         /* If we want promiscuous mode, set the allframes bit. */
  675         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  676                 if (ifp->if_flags & IFF_PROMISC)
  677                         rxfilt |= XL_RXFILTER_ALLFRAMES;
  678                 if (ifp->if_flags & IFF_ALLMULTI)
  679                         rxfilt |= XL_RXFILTER_ALLMULTI;
  680         } else {
  681                 /* First, zot all the existing hash bits. */
  682                 for (i = 0; i < XL_HASHFILT_SIZE; i++)
  683                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i);
  684 
  685                 /* Now program new ones. */
  686                 mcnt = 0;
  687                 if_maddr_rlock(ifp);
  688                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  689                         if (ifma->ifma_addr->sa_family != AF_LINK)
  690                                 continue;
  691                         /*
  692                          * Note: the 3c905B currently only supports a 64-bit
  693                          * hash table, which means we really only need 6 bits,
  694                          * but the manual indicates that future chip revisions
  695                          * will have a 256-bit hash table, hence the routine
  696                          * is set up to calculate 8 bits of position info in
  697                          * case we need it some day.
  698                          * Note II, The Sequel: _CURRENT_ versions of the
  699                          * 3c905B have a 256 bit hash table. This means we have
  700                          * to use all 8 bits regardless.  On older cards, the
  701                          * upper 2 bits will be ignored. Grrrr....
  702                          */
  703                         h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  704                             ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
  705                         CSR_WRITE_2(sc, XL_COMMAND,
  706                             h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
  707                         mcnt++;
  708                 }
  709                 if_maddr_runlock(ifp);
  710                 if (mcnt > 0)
  711                         rxfilt |= XL_RXFILTER_MULTIHASH;
  712         }
  713 
  714         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  715         XL_SEL_WIN(7);
  716 }
  717 
  718 static void
  719 xl_setcfg(struct xl_softc *sc)
  720 {
  721         u_int32_t               icfg;
  722 
  723         /*XL_LOCK_ASSERT(sc);*/
  724 
  725         XL_SEL_WIN(3);
  726         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  727         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  728         if (sc->xl_media & XL_MEDIAOPT_MII ||
  729                 sc->xl_media & XL_MEDIAOPT_BT4)
  730                 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
  731         if (sc->xl_media & XL_MEDIAOPT_BTX)
  732                 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
  733 
  734         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  735         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  736 }
  737 
  738 static void
  739 xl_setmode(struct xl_softc *sc, int media)
  740 {
  741         u_int32_t               icfg;
  742         u_int16_t               mediastat;
  743         char                    *pmsg = "", *dmsg = "";
  744 
  745         XL_LOCK_ASSERT(sc);
  746 
  747         XL_SEL_WIN(4);
  748         mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
  749         XL_SEL_WIN(3);
  750         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  751 
  752         if (sc->xl_media & XL_MEDIAOPT_BT) {
  753                 if (IFM_SUBTYPE(media) == IFM_10_T) {
  754                         pmsg = "10baseT transceiver";
  755                         sc->xl_xcvr = XL_XCVR_10BT;
  756                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  757                         icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
  758                         mediastat |= XL_MEDIASTAT_LINKBEAT |
  759                             XL_MEDIASTAT_JABGUARD;
  760                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  761                 }
  762         }
  763 
  764         if (sc->xl_media & XL_MEDIAOPT_BFX) {
  765                 if (IFM_SUBTYPE(media) == IFM_100_FX) {
  766                         pmsg = "100baseFX port";
  767                         sc->xl_xcvr = XL_XCVR_100BFX;
  768                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  769                         icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
  770                         mediastat |= XL_MEDIASTAT_LINKBEAT;
  771                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  772                 }
  773         }
  774 
  775         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
  776                 if (IFM_SUBTYPE(media) == IFM_10_5) {
  777                         pmsg = "AUI port";
  778                         sc->xl_xcvr = XL_XCVR_AUI;
  779                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  780                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  781                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  782                             XL_MEDIASTAT_JABGUARD);
  783                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  784                 }
  785                 if (IFM_SUBTYPE(media) == IFM_10_FL) {
  786                         pmsg = "10baseFL transceiver";
  787                         sc->xl_xcvr = XL_XCVR_AUI;
  788                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  789                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  790                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  791                             XL_MEDIASTAT_JABGUARD);
  792                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  793                 }
  794         }
  795 
  796         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  797                 if (IFM_SUBTYPE(media) == IFM_10_2) {
  798                         pmsg = "AUI port";
  799                         sc->xl_xcvr = XL_XCVR_COAX;
  800                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  801                         icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
  802                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  803                             XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
  804                 }
  805         }
  806 
  807         if ((media & IFM_GMASK) == IFM_FDX ||
  808                         IFM_SUBTYPE(media) == IFM_100_FX) {
  809                 dmsg = "full";
  810                 XL_SEL_WIN(3);
  811                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  812         } else {
  813                 dmsg = "half";
  814                 XL_SEL_WIN(3);
  815                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  816                         (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  817         }
  818 
  819         if (IFM_SUBTYPE(media) == IFM_10_2)
  820                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
  821         else
  822                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  823 
  824         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  825         XL_SEL_WIN(4);
  826         CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
  827 
  828         DELAY(800);
  829         XL_SEL_WIN(7);
  830 
  831         device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
  832 }
  833 
  834 static void
  835 xl_reset(struct xl_softc *sc)
  836 {
  837         register int            i;
  838 
  839         XL_LOCK_ASSERT(sc);
  840 
  841         XL_SEL_WIN(0);
  842         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
  843             ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
  844              XL_RESETOPT_DISADVFD:0));
  845 
  846         /*
  847          * If we're using memory mapped register mode, pause briefly
  848          * after issuing the reset command before trying to access any
  849          * other registers. With my 3c575C CardBus card, failing to do
  850          * this results in the system locking up while trying to poll
  851          * the command busy bit in the status register.
  852          */
  853         if (sc->xl_flags & XL_FLAG_USE_MMIO)
  854                 DELAY(100000);
  855 
  856         for (i = 0; i < XL_TIMEOUT; i++) {
  857                 DELAY(10);
  858                 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
  859                         break;
  860         }
  861 
  862         if (i == XL_TIMEOUT)
  863                 device_printf(sc->xl_dev, "reset didn't complete\n");
  864 
  865         /* Reset TX and RX. */
  866         /* Note: the RX reset takes an absurd amount of time
  867          * on newer versions of the Tornado chips such as those
  868          * on the 3c905CX and newer 3c908C cards. We wait an
  869          * extra amount of time so that xl_wait() doesn't complain
  870          * and annoy the users.
  871          */
  872         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
  873         DELAY(100000);
  874         xl_wait(sc);
  875         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
  876         xl_wait(sc);
  877 
  878         if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
  879             sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
  880                 XL_SEL_WIN(2);
  881                 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
  882                     CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
  883                     ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
  884                     XL_RESETOPT_INVERT_LED : 0) |
  885                     ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
  886                     XL_RESETOPT_INVERT_MII : 0));
  887         }
  888 
  889         /* Wait a little while for the chip to get its brains in order. */
  890         DELAY(100000);
  891 }
  892 
  893 /*
  894  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
  895  * IDs against our list and return a device name if we find a match.
  896  */
  897 static int
  898 xl_probe(device_t dev)
  899 {
  900         const struct xl_type    *t;
  901 
  902         t = xl_devs;
  903 
  904         while (t->xl_name != NULL) {
  905                 if ((pci_get_vendor(dev) == t->xl_vid) &&
  906                     (pci_get_device(dev) == t->xl_did)) {
  907                         device_set_desc(dev, t->xl_name);
  908                         return (BUS_PROBE_DEFAULT);
  909                 }
  910                 t++;
  911         }
  912 
  913         return (ENXIO);
  914 }
  915 
  916 /*
  917  * This routine is a kludge to work around possible hardware faults
  918  * or manufacturing defects that can cause the media options register
  919  * (or reset options register, as it's called for the first generation
  920  * 3c90x adapters) to return an incorrect result. I have encountered
  921  * one Dell Latitude laptop docking station with an integrated 3c905-TX
  922  * which doesn't have any of the 'mediaopt' bits set. This screws up
  923  * the attach routine pretty badly because it doesn't know what media
  924  * to look for. If we find ourselves in this predicament, this routine
  925  * will try to guess the media options values and warn the user of a
  926  * possible manufacturing defect with his adapter/system/whatever.
  927  */
  928 static void
  929 xl_mediacheck(struct xl_softc *sc)
  930 {
  931 
  932         /*
  933          * If some of the media options bits are set, assume they are
  934          * correct. If not, try to figure it out down below.
  935          * XXX I should check for 10baseFL, but I don't have an adapter
  936          * to test with.
  937          */
  938         if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
  939                 /*
  940                  * Check the XCVR value. If it's not in the normal range
  941                  * of values, we need to fake it up here.
  942                  */
  943                 if (sc->xl_xcvr <= XL_XCVR_AUTO)
  944                         return;
  945                 else {
  946                         device_printf(sc->xl_dev,
  947                             "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
  948                         device_printf(sc->xl_dev,
  949                             "choosing new default based on card type\n");
  950                 }
  951         } else {
  952                 if (sc->xl_type == XL_TYPE_905B &&
  953                     sc->xl_media & XL_MEDIAOPT_10FL)
  954                         return;
  955                 device_printf(sc->xl_dev,
  956 "WARNING: no media options bits set in the media options register!!\n");
  957                 device_printf(sc->xl_dev,
  958 "this could be a manufacturing defect in your adapter or system\n");
  959                 device_printf(sc->xl_dev,
  960 "attempting to guess media type; you should probably consult your vendor\n");
  961         }
  962 
  963         xl_choose_xcvr(sc, 1);
  964 }
  965 
  966 static void
  967 xl_choose_xcvr(struct xl_softc *sc, int verbose)
  968 {
  969         u_int16_t               devid;
  970 
  971         /*
  972          * Read the device ID from the EEPROM.
  973          * This is what's loaded into the PCI device ID register, so it has
  974          * to be correct otherwise we wouldn't have gotten this far.
  975          */
  976         xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
  977 
  978         switch (devid) {
  979         case TC_DEVICEID_BOOMERANG_10BT:        /* 3c900-TPO */
  980         case TC_DEVICEID_KRAKATOA_10BT:         /* 3c900B-TPO */
  981                 sc->xl_media = XL_MEDIAOPT_BT;
  982                 sc->xl_xcvr = XL_XCVR_10BT;
  983                 if (verbose)
  984                         device_printf(sc->xl_dev,
  985                             "guessing 10BaseT transceiver\n");
  986                 break;
  987         case TC_DEVICEID_BOOMERANG_10BT_COMBO:  /* 3c900-COMBO */
  988         case TC_DEVICEID_KRAKATOA_10BT_COMBO:   /* 3c900B-COMBO */
  989                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
  990                 sc->xl_xcvr = XL_XCVR_10BT;
  991                 if (verbose)
  992                         device_printf(sc->xl_dev,
  993                             "guessing COMBO (AUI/BNC/TP)\n");
  994                 break;
  995         case TC_DEVICEID_KRAKATOA_10BT_TPC:     /* 3c900B-TPC */
  996                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
  997                 sc->xl_xcvr = XL_XCVR_10BT;
  998                 if (verbose)
  999                         device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
 1000                 break;
 1001         case TC_DEVICEID_CYCLONE_10FL:          /* 3c900B-FL */
 1002                 sc->xl_media = XL_MEDIAOPT_10FL;
 1003                 sc->xl_xcvr = XL_XCVR_AUI;
 1004                 if (verbose)
 1005                         device_printf(sc->xl_dev, "guessing 10baseFL\n");
 1006                 break;
 1007         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1008         case TC_DEVICEID_HURRICANE_555:         /* 3c555 */
 1009         case TC_DEVICEID_HURRICANE_556:         /* 3c556 */
 1010         case TC_DEVICEID_HURRICANE_556B:        /* 3c556B */
 1011         case TC_DEVICEID_HURRICANE_575A:        /* 3c575TX */
 1012         case TC_DEVICEID_HURRICANE_575B:        /* 3c575B */
 1013         case TC_DEVICEID_HURRICANE_575C:        /* 3c575C */
 1014         case TC_DEVICEID_HURRICANE_656:         /* 3c656 */
 1015         case TC_DEVICEID_HURRICANE_656B:        /* 3c656B */
 1016         case TC_DEVICEID_TORNADO_656C:          /* 3c656C */
 1017         case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
 1018         case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:     /* 3c920B-EMB-WNM */
 1019                 sc->xl_media = XL_MEDIAOPT_MII;
 1020                 sc->xl_xcvr = XL_XCVR_MII;
 1021                 if (verbose)
 1022                         device_printf(sc->xl_dev, "guessing MII\n");
 1023                 break;
 1024         case TC_DEVICEID_BOOMERANG_100BT4:      /* 3c905-T4 */
 1025         case TC_DEVICEID_CYCLONE_10_100BT4:     /* 3c905B-T4 */
 1026                 sc->xl_media = XL_MEDIAOPT_BT4;
 1027                 sc->xl_xcvr = XL_XCVR_MII;
 1028                 if (verbose)
 1029                         device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
 1030                 break;
 1031         case TC_DEVICEID_HURRICANE_10_100BT:    /* 3c905B-TX */
 1032         case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
 1033         case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
 1034         case TC_DEVICEID_HURRICANE_SOHO100TX:   /* 3cSOHO100-TX */
 1035         case TC_DEVICEID_TORNADO_10_100BT:      /* 3c905C-TX */
 1036         case TC_DEVICEID_TORNADO_HOMECONNECT:   /* 3c450-TX */
 1037                 sc->xl_media = XL_MEDIAOPT_BTX;
 1038                 sc->xl_xcvr = XL_XCVR_AUTO;
 1039                 if (verbose)
 1040                         device_printf(sc->xl_dev, "guessing 10/100 internal\n");
 1041                 break;
 1042         case TC_DEVICEID_CYCLONE_10_100_COMBO:  /* 3c905B-COMBO */
 1043                 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1044                 sc->xl_xcvr = XL_XCVR_AUTO;
 1045                 if (verbose)
 1046                         device_printf(sc->xl_dev,
 1047                             "guessing 10/100 plus BNC/AUI\n");
 1048                 break;
 1049         default:
 1050                 device_printf(sc->xl_dev,
 1051                     "unknown device ID: %x -- defaulting to 10baseT\n", devid);
 1052                 sc->xl_media = XL_MEDIAOPT_BT;
 1053                 break;
 1054         }
 1055 }
 1056 
 1057 /*
 1058  * Attach the interface. Allocate softc structures, do ifmedia
 1059  * setup and ethernet/BPF attach.
 1060  */
 1061 static int
 1062 xl_attach(device_t dev)
 1063 {
 1064         u_char                  eaddr[ETHER_ADDR_LEN];
 1065         u_int16_t               sinfo2, xcvr[2];
 1066         struct xl_softc         *sc;
 1067         struct ifnet            *ifp;
 1068         int                     media, pmcap;
 1069         int                     error = 0, phy, rid, res, unit;
 1070         uint16_t                did;
 1071 
 1072         sc = device_get_softc(dev);
 1073         sc->xl_dev = dev;
 1074 
 1075         unit = device_get_unit(dev);
 1076 
 1077         mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1078             MTX_DEF);
 1079         ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
 1080 
 1081         did = pci_get_device(dev);
 1082 
 1083         sc->xl_flags = 0;
 1084         if (did == TC_DEVICEID_HURRICANE_555)
 1085                 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
 1086         if (did == TC_DEVICEID_HURRICANE_556 ||
 1087             did == TC_DEVICEID_HURRICANE_556B)
 1088                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
 1089                     XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
 1090                     XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
 1091         if (did == TC_DEVICEID_HURRICANE_555 ||
 1092             did == TC_DEVICEID_HURRICANE_556)
 1093                 sc->xl_flags |= XL_FLAG_8BITROM;
 1094         if (did == TC_DEVICEID_HURRICANE_556B)
 1095                 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
 1096 
 1097         if (did == TC_DEVICEID_HURRICANE_575B ||
 1098             did == TC_DEVICEID_HURRICANE_575C ||
 1099             did == TC_DEVICEID_HURRICANE_656B ||
 1100             did == TC_DEVICEID_TORNADO_656C)
 1101                 sc->xl_flags |= XL_FLAG_FUNCREG;
 1102         if (did == TC_DEVICEID_HURRICANE_575A ||
 1103             did == TC_DEVICEID_HURRICANE_575B ||
 1104             did == TC_DEVICEID_HURRICANE_575C ||
 1105             did == TC_DEVICEID_HURRICANE_656B ||
 1106             did == TC_DEVICEID_TORNADO_656C)
 1107                 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
 1108                   XL_FLAG_8BITROM;
 1109         if (did == TC_DEVICEID_HURRICANE_656)
 1110                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
 1111         if (did == TC_DEVICEID_HURRICANE_575B)
 1112                 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
 1113         if (did == TC_DEVICEID_HURRICANE_575C)
 1114                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1115         if (did == TC_DEVICEID_TORNADO_656C)
 1116                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1117         if (did == TC_DEVICEID_HURRICANE_656 ||
 1118             did == TC_DEVICEID_HURRICANE_656B)
 1119                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
 1120                     XL_FLAG_INVERT_LED_PWR;
 1121         if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
 1122             did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
 1123                 sc->xl_flags |= XL_FLAG_PHYOK;
 1124 
 1125         switch (did) {
 1126         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1127         case TC_DEVICEID_HURRICANE_575A:
 1128         case TC_DEVICEID_HURRICANE_575B:
 1129         case TC_DEVICEID_HURRICANE_575C:
 1130                 sc->xl_flags |= XL_FLAG_NO_MMIO;
 1131                 break;
 1132         default:
 1133                 break;
 1134         }
 1135 
 1136         /*
 1137          * Map control/status registers.
 1138          */
 1139         pci_enable_busmaster(dev);
 1140 
 1141         if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
 1142                 rid = XL_PCI_LOMEM;
 1143                 res = SYS_RES_MEMORY;
 1144 
 1145                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1146         }
 1147 
 1148         if (sc->xl_res != NULL) {
 1149                 sc->xl_flags |= XL_FLAG_USE_MMIO;
 1150                 if (bootverbose)
 1151                         device_printf(dev, "using memory mapped I/O\n");
 1152         } else {
 1153                 rid = XL_PCI_LOIO;
 1154                 res = SYS_RES_IOPORT;
 1155                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1156                 if (sc->xl_res == NULL) {
 1157                         device_printf(dev, "couldn't map ports/memory\n");
 1158                         error = ENXIO;
 1159                         goto fail;
 1160                 }
 1161                 if (bootverbose)
 1162                         device_printf(dev, "using port I/O\n");
 1163         }
 1164 
 1165         sc->xl_btag = rman_get_bustag(sc->xl_res);
 1166         sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
 1167 
 1168         if (sc->xl_flags & XL_FLAG_FUNCREG) {
 1169                 rid = XL_PCI_FUNCMEM;
 1170                 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1171                     RF_ACTIVE);
 1172 
 1173                 if (sc->xl_fres == NULL) {
 1174                         device_printf(dev, "couldn't map funcreg memory\n");
 1175                         error = ENXIO;
 1176                         goto fail;
 1177                 }
 1178 
 1179                 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
 1180                 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
 1181         }
 1182 
 1183         /* Allocate interrupt */
 1184         rid = 0;
 1185         sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1186             RF_SHAREABLE | RF_ACTIVE);
 1187         if (sc->xl_irq == NULL) {
 1188                 device_printf(dev, "couldn't map interrupt\n");
 1189                 error = ENXIO;
 1190                 goto fail;
 1191         }
 1192 
 1193         /* Initialize interface name. */
 1194         ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
 1195         if (ifp == NULL) {
 1196                 device_printf(dev, "can not if_alloc()\n");
 1197                 error = ENOSPC;
 1198                 goto fail;
 1199         }
 1200         ifp->if_softc = sc;
 1201         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1202 
 1203         /* Reset the adapter. */
 1204         XL_LOCK(sc);
 1205         xl_reset(sc);
 1206         XL_UNLOCK(sc);
 1207 
 1208         /*
 1209          * Get station address from the EEPROM.
 1210          */
 1211         if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
 1212                 device_printf(dev, "failed to read station address\n");
 1213                 error = ENXIO;
 1214                 goto fail;
 1215         }
 1216 
 1217         callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0);
 1218         TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
 1219 
 1220         /*
 1221          * Now allocate a tag for the DMA descriptor lists and a chunk
 1222          * of DMA-able memory based on the tag.  Also obtain the DMA
 1223          * addresses of the RX and TX ring, which we'll need later.
 1224          * All of our lists are allocated as a contiguous block
 1225          * of memory.
 1226          */
 1227         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1228             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1229             XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
 1230             &sc->xl_ldata.xl_rx_tag);
 1231         if (error) {
 1232                 device_printf(dev, "failed to allocate rx dma tag\n");
 1233                 goto fail;
 1234         }
 1235 
 1236         error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
 1237             (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT |
 1238             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap);
 1239         if (error) {
 1240                 device_printf(dev, "no memory for rx list buffers!\n");
 1241                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1242                 sc->xl_ldata.xl_rx_tag = NULL;
 1243                 goto fail;
 1244         }
 1245 
 1246         error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
 1247             sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
 1248             XL_RX_LIST_SZ, xl_dma_map_addr,
 1249             &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
 1250         if (error) {
 1251                 device_printf(dev, "cannot get dma address of the rx ring!\n");
 1252                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1253                     sc->xl_ldata.xl_rx_dmamap);
 1254                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1255                 sc->xl_ldata.xl_rx_tag = NULL;
 1256                 goto fail;
 1257         }
 1258 
 1259         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1260             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1261             XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
 1262             &sc->xl_ldata.xl_tx_tag);
 1263         if (error) {
 1264                 device_printf(dev, "failed to allocate tx dma tag\n");
 1265                 goto fail;
 1266         }
 1267 
 1268         error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
 1269             (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT |
 1270             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap);
 1271         if (error) {
 1272                 device_printf(dev, "no memory for list buffers!\n");
 1273                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1274                 sc->xl_ldata.xl_tx_tag = NULL;
 1275                 goto fail;
 1276         }
 1277 
 1278         error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
 1279             sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
 1280             XL_TX_LIST_SZ, xl_dma_map_addr,
 1281             &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
 1282         if (error) {
 1283                 device_printf(dev, "cannot get dma address of the tx ring!\n");
 1284                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1285                     sc->xl_ldata.xl_tx_dmamap);
 1286                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1287                 sc->xl_ldata.xl_tx_tag = NULL;
 1288                 goto fail;
 1289         }
 1290 
 1291         /*
 1292          * Allocate a DMA tag for the mapping of mbufs.
 1293          */
 1294         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
 1295             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1296             MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
 1297             NULL, &sc->xl_mtag);
 1298         if (error) {
 1299                 device_printf(dev, "failed to allocate mbuf dma tag\n");
 1300                 goto fail;
 1301         }
 1302 
 1303         /* We need a spare DMA map for the RX ring. */
 1304         error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
 1305         if (error)
 1306                 goto fail;
 1307 
 1308         /*
 1309          * Figure out the card type. 3c905B adapters have the
 1310          * 'supportsNoTxLength' bit set in the capabilities
 1311          * word in the EEPROM.
 1312          * Note: my 3c575C CardBus card lies. It returns a value
 1313          * of 0x1578 for its capabilities word, which is somewhat
 1314          * nonsensical. Another way to distinguish a 3c90x chip
 1315          * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
 1316          * bit. This will only be set for 3c90x boomerage chips.
 1317          */
 1318         xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
 1319         if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
 1320             !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
 1321                 sc->xl_type = XL_TYPE_905B;
 1322         else
 1323                 sc->xl_type = XL_TYPE_90X;
 1324 
 1325         /* Check availability of WOL. */
 1326         if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 &&
 1327             pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) {
 1328                 sc->xl_pmcap = pmcap;
 1329                 sc->xl_flags |= XL_FLAG_WOL;
 1330                 sinfo2 = 0;
 1331                 xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0);
 1332                 if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose)
 1333                         device_printf(dev,
 1334                             "No auxiliary remote wakeup connector!\n");
 1335         }
 1336 
 1337         /* Set the TX start threshold for best performance. */
 1338         sc->xl_tx_thresh = XL_MIN_FRAMELEN;
 1339 
 1340         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1341         ifp->if_ioctl = xl_ioctl;
 1342         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1343         if (sc->xl_type == XL_TYPE_905B) {
 1344                 ifp->if_hwassist = XL905B_CSUM_FEATURES;
 1345 #ifdef XL905B_TXCSUM_BROKEN
 1346                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1347 #else
 1348                 ifp->if_capabilities |= IFCAP_HWCSUM;
 1349 #endif
 1350         }
 1351         if ((sc->xl_flags & XL_FLAG_WOL) != 0)
 1352                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
 1353         ifp->if_capenable = ifp->if_capabilities;
 1354 #ifdef DEVICE_POLLING
 1355         ifp->if_capabilities |= IFCAP_POLLING;
 1356 #endif
 1357         ifp->if_start = xl_start;
 1358         ifp->if_init = xl_init;
 1359         IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
 1360         ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
 1361         IFQ_SET_READY(&ifp->if_snd);
 1362 
 1363         /*
 1364          * Now we have to see what sort of media we have.
 1365          * This includes probing for an MII interace and a
 1366          * possible PHY.
 1367          */
 1368         XL_SEL_WIN(3);
 1369         sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
 1370         if (bootverbose)
 1371                 device_printf(dev, "media options word: %x\n", sc->xl_media);
 1372 
 1373         xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
 1374         sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
 1375         sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
 1376         sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
 1377 
 1378         xl_mediacheck(sc);
 1379 
 1380         if (sc->xl_media & XL_MEDIAOPT_MII ||
 1381             sc->xl_media & XL_MEDIAOPT_BTX ||
 1382             sc->xl_media & XL_MEDIAOPT_BT4) {
 1383                 if (bootverbose)
 1384                         device_printf(dev, "found MII/AUTO\n");
 1385                 xl_setcfg(sc);
 1386                 /*
 1387                  * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK.
 1388                  * This is to guard against problems with certain 3Com ASIC
 1389                  * revisions that incorrectly map the internal transceiver
 1390                  * control registers at all MII addresses.
 1391                  */
 1392                 phy = MII_PHY_ANY;
 1393                 if ((sc->xl_flags & XL_FLAG_PHYOK) == 0)
 1394                         phy = 24;
 1395                 error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd,
 1396                     xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
 1397                     sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0);
 1398                 if (error != 0) {
 1399                         device_printf(dev, "attaching PHYs failed\n");
 1400                         goto fail;
 1401                 }
 1402                 goto done;
 1403         }
 1404 
 1405         /*
 1406          * Sanity check. If the user has selected "auto" and this isn't
 1407          * a 10/100 card of some kind, we need to force the transceiver
 1408          * type to something sane.
 1409          */
 1410         if (sc->xl_xcvr == XL_XCVR_AUTO)
 1411                 xl_choose_xcvr(sc, bootverbose);
 1412 
 1413         /*
 1414          * Do ifmedia setup.
 1415          */
 1416         if (sc->xl_media & XL_MEDIAOPT_BT) {
 1417                 if (bootverbose)
 1418                         device_printf(dev, "found 10baseT\n");
 1419                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 1420                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
 1421                 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1422                         ifmedia_add(&sc->ifmedia,
 1423                             IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 1424         }
 1425 
 1426         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
 1427                 /*
 1428                  * Check for a 10baseFL board in disguise.
 1429                  */
 1430                 if (sc->xl_type == XL_TYPE_905B &&
 1431                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1432                         if (bootverbose)
 1433                                 device_printf(dev, "found 10baseFL\n");
 1434                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
 1435                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
 1436                             0, NULL);
 1437                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1438                                 ifmedia_add(&sc->ifmedia,
 1439                                     IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
 1440                 } else {
 1441                         if (bootverbose)
 1442                                 device_printf(dev, "found AUI\n");
 1443                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
 1444                 }
 1445         }
 1446 
 1447         if (sc->xl_media & XL_MEDIAOPT_BNC) {
 1448                 if (bootverbose)
 1449                         device_printf(dev, "found BNC\n");
 1450                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
 1451         }
 1452 
 1453         if (sc->xl_media & XL_MEDIAOPT_BFX) {
 1454                 if (bootverbose)
 1455                         device_printf(dev, "found 100baseFX\n");
 1456                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
 1457         }
 1458 
 1459         media = IFM_ETHER|IFM_100_TX|IFM_FDX;
 1460         xl_choose_media(sc, &media);
 1461 
 1462         if (sc->xl_miibus == NULL)
 1463                 ifmedia_set(&sc->ifmedia, media);
 1464 
 1465 done:
 1466         if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
 1467                 XL_SEL_WIN(0);
 1468                 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
 1469         }
 1470 
 1471         /*
 1472          * Call MI attach routine.
 1473          */
 1474         ether_ifattach(ifp, eaddr);
 1475 
 1476         error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1477             NULL, xl_intr, sc, &sc->xl_intrhand);
 1478         if (error) {
 1479                 device_printf(dev, "couldn't set up irq\n");
 1480                 ether_ifdetach(ifp);
 1481                 goto fail;
 1482         }
 1483 
 1484 fail:
 1485         if (error)
 1486                 xl_detach(dev);
 1487 
 1488         return (error);
 1489 }
 1490 
 1491 /*
 1492  * Choose a default media.
 1493  * XXX This is a leaf function only called by xl_attach() and
 1494  *     acquires/releases the non-recursible driver mutex to
 1495  *     satisfy lock assertions.
 1496  */
 1497 static void
 1498 xl_choose_media(struct xl_softc *sc, int *media)
 1499 {
 1500 
 1501         XL_LOCK(sc);
 1502 
 1503         switch (sc->xl_xcvr) {
 1504         case XL_XCVR_10BT:
 1505                 *media = IFM_ETHER|IFM_10_T;
 1506                 xl_setmode(sc, *media);
 1507                 break;
 1508         case XL_XCVR_AUI:
 1509                 if (sc->xl_type == XL_TYPE_905B &&
 1510                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1511                         *media = IFM_ETHER|IFM_10_FL;
 1512                         xl_setmode(sc, *media);
 1513                 } else {
 1514                         *media = IFM_ETHER|IFM_10_5;
 1515                         xl_setmode(sc, *media);
 1516                 }
 1517                 break;
 1518         case XL_XCVR_COAX:
 1519                 *media = IFM_ETHER|IFM_10_2;
 1520                 xl_setmode(sc, *media);
 1521                 break;
 1522         case XL_XCVR_AUTO:
 1523         case XL_XCVR_100BTX:
 1524         case XL_XCVR_MII:
 1525                 /* Chosen by miibus */
 1526                 break;
 1527         case XL_XCVR_100BFX:
 1528                 *media = IFM_ETHER|IFM_100_FX;
 1529                 break;
 1530         default:
 1531                 device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
 1532                     sc->xl_xcvr);
 1533                 /*
 1534                  * This will probably be wrong, but it prevents
 1535                  * the ifmedia code from panicking.
 1536                  */
 1537                 *media = IFM_ETHER|IFM_10_T;
 1538                 break;
 1539         }
 1540 
 1541         XL_UNLOCK(sc);
 1542 }
 1543 
 1544 /*
 1545  * Shutdown hardware and free up resources. This can be called any
 1546  * time after the mutex has been initialized. It is called in both
 1547  * the error case in attach and the normal detach case so it needs
 1548  * to be careful about only freeing resources that have actually been
 1549  * allocated.
 1550  */
 1551 static int
 1552 xl_detach(device_t dev)
 1553 {
 1554         struct xl_softc         *sc;
 1555         struct ifnet            *ifp;
 1556         int                     rid, res;
 1557 
 1558         sc = device_get_softc(dev);
 1559         ifp = sc->xl_ifp;
 1560 
 1561         KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
 1562 
 1563 #ifdef DEVICE_POLLING
 1564         if (ifp && ifp->if_capenable & IFCAP_POLLING)
 1565                 ether_poll_deregister(ifp);
 1566 #endif
 1567 
 1568         if (sc->xl_flags & XL_FLAG_USE_MMIO) {
 1569                 rid = XL_PCI_LOMEM;
 1570                 res = SYS_RES_MEMORY;
 1571         } else {
 1572                 rid = XL_PCI_LOIO;
 1573                 res = SYS_RES_IOPORT;
 1574         }
 1575 
 1576         /* These should only be active if attach succeeded */
 1577         if (device_is_attached(dev)) {
 1578                 XL_LOCK(sc);
 1579                 xl_stop(sc);
 1580                 XL_UNLOCK(sc);
 1581                 taskqueue_drain(taskqueue_swi, &sc->xl_task);
 1582                 callout_drain(&sc->xl_tick_callout);
 1583                 ether_ifdetach(ifp);
 1584         }
 1585         if (sc->xl_miibus)
 1586                 device_delete_child(dev, sc->xl_miibus);
 1587         bus_generic_detach(dev);
 1588         ifmedia_removeall(&sc->ifmedia);
 1589 
 1590         if (sc->xl_intrhand)
 1591                 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
 1592         if (sc->xl_irq)
 1593                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
 1594         if (sc->xl_fres != NULL)
 1595                 bus_release_resource(dev, SYS_RES_MEMORY,
 1596                     XL_PCI_FUNCMEM, sc->xl_fres);
 1597         if (sc->xl_res)
 1598                 bus_release_resource(dev, res, rid, sc->xl_res);
 1599 
 1600         if (ifp)
 1601                 if_free(ifp);
 1602 
 1603         if (sc->xl_mtag) {
 1604                 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
 1605                 bus_dma_tag_destroy(sc->xl_mtag);
 1606         }
 1607         if (sc->xl_ldata.xl_rx_tag) {
 1608                 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
 1609                     sc->xl_ldata.xl_rx_dmamap);
 1610                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1611                     sc->xl_ldata.xl_rx_dmamap);
 1612                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1613         }
 1614         if (sc->xl_ldata.xl_tx_tag) {
 1615                 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
 1616                     sc->xl_ldata.xl_tx_dmamap);
 1617                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1618                     sc->xl_ldata.xl_tx_dmamap);
 1619                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1620         }
 1621 
 1622         mtx_destroy(&sc->xl_mtx);
 1623 
 1624         return (0);
 1625 }
 1626 
 1627 /*
 1628  * Initialize the transmit descriptors.
 1629  */
 1630 static int
 1631 xl_list_tx_init(struct xl_softc *sc)
 1632 {
 1633         struct xl_chain_data    *cd;
 1634         struct xl_list_data     *ld;
 1635         int                     error, i;
 1636 
 1637         XL_LOCK_ASSERT(sc);
 1638 
 1639         cd = &sc->xl_cdata;
 1640         ld = &sc->xl_ldata;
 1641         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1642                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1643                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1644                     &cd->xl_tx_chain[i].xl_map);
 1645                 if (error)
 1646                         return (error);
 1647                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1648                     i * sizeof(struct xl_list);
 1649                 if (i == (XL_TX_LIST_CNT - 1))
 1650                         cd->xl_tx_chain[i].xl_next = NULL;
 1651                 else
 1652                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1653         }
 1654 
 1655         cd->xl_tx_free = &cd->xl_tx_chain[0];
 1656         cd->xl_tx_tail = cd->xl_tx_head = NULL;
 1657 
 1658         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1659         return (0);
 1660 }
 1661 
 1662 /*
 1663  * Initialize the transmit descriptors.
 1664  */
 1665 static int
 1666 xl_list_tx_init_90xB(struct xl_softc *sc)
 1667 {
 1668         struct xl_chain_data    *cd;
 1669         struct xl_list_data     *ld;
 1670         int                     error, i;
 1671 
 1672         XL_LOCK_ASSERT(sc);
 1673 
 1674         cd = &sc->xl_cdata;
 1675         ld = &sc->xl_ldata;
 1676         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1677                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1678                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1679                     &cd->xl_tx_chain[i].xl_map);
 1680                 if (error)
 1681                         return (error);
 1682                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1683                     i * sizeof(struct xl_list);
 1684                 if (i == (XL_TX_LIST_CNT - 1))
 1685                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
 1686                 else
 1687                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1688                 if (i == 0)
 1689                         cd->xl_tx_chain[i].xl_prev =
 1690                             &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
 1691                 else
 1692                         cd->xl_tx_chain[i].xl_prev =
 1693                             &cd->xl_tx_chain[i - 1];
 1694         }
 1695 
 1696         bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
 1697         ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
 1698 
 1699         cd->xl_tx_prod = 1;
 1700         cd->xl_tx_cons = 1;
 1701         cd->xl_tx_cnt = 0;
 1702 
 1703         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1704         return (0);
 1705 }
 1706 
 1707 /*
 1708  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1709  * we arrange the descriptors in a closed ring, so that the last descriptor
 1710  * points back to the first.
 1711  */
 1712 static int
 1713 xl_list_rx_init(struct xl_softc *sc)
 1714 {
 1715         struct xl_chain_data    *cd;
 1716         struct xl_list_data     *ld;
 1717         int                     error, i, next;
 1718         u_int32_t               nextptr;
 1719 
 1720         XL_LOCK_ASSERT(sc);
 1721 
 1722         cd = &sc->xl_cdata;
 1723         ld = &sc->xl_ldata;
 1724 
 1725         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1726                 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
 1727                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1728                     &cd->xl_rx_chain[i].xl_map);
 1729                 if (error)
 1730                         return (error);
 1731                 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
 1732                 if (error)
 1733                         return (error);
 1734                 if (i == (XL_RX_LIST_CNT - 1))
 1735                         next = 0;
 1736                 else
 1737                         next = i + 1;
 1738                 nextptr = ld->xl_rx_dmaaddr +
 1739                     next * sizeof(struct xl_list_onefrag);
 1740                 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
 1741                 ld->xl_rx_list[i].xl_next = htole32(nextptr);
 1742         }
 1743 
 1744         bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1745         cd->xl_rx_head = &cd->xl_rx_chain[0];
 1746 
 1747         return (0);
 1748 }
 1749 
 1750 /*
 1751  * Initialize an RX descriptor and attach an MBUF cluster.
 1752  * If we fail to do so, we need to leave the old mbuf and
 1753  * the old DMA map untouched so that it can be reused.
 1754  */
 1755 static int
 1756 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
 1757 {
 1758         struct mbuf             *m_new = NULL;
 1759         bus_dmamap_t            map;
 1760         bus_dma_segment_t       segs[1];
 1761         int                     error, nseg;
 1762 
 1763         XL_LOCK_ASSERT(sc);
 1764 
 1765         m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
 1766         if (m_new == NULL)
 1767                 return (ENOBUFS);
 1768 
 1769         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
 1770 
 1771         /* Force longword alignment for packet payload. */
 1772         m_adj(m_new, ETHER_ALIGN);
 1773 
 1774         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
 1775             segs, &nseg, BUS_DMA_NOWAIT);
 1776         if (error) {
 1777                 m_freem(m_new);
 1778                 device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
 1779                     error);
 1780                 return (error);
 1781         }
 1782         KASSERT(nseg == 1,
 1783             ("%s: too many DMA segments (%d)", __func__, nseg));
 1784 
 1785         bus_dmamap_unload(sc->xl_mtag, c->xl_map);
 1786         map = c->xl_map;
 1787         c->xl_map = sc->xl_tmpmap;
 1788         sc->xl_tmpmap = map;
 1789         c->xl_mbuf = m_new;
 1790         c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
 1791         c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
 1792         c->xl_ptr->xl_status = 0;
 1793         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
 1794         return (0);
 1795 }
 1796 
 1797 static int
 1798 xl_rx_resync(struct xl_softc *sc)
 1799 {
 1800         struct xl_chain_onefrag *pos;
 1801         int                     i;
 1802 
 1803         XL_LOCK_ASSERT(sc);
 1804 
 1805         pos = sc->xl_cdata.xl_rx_head;
 1806 
 1807         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1808                 if (pos->xl_ptr->xl_status)
 1809                         break;
 1810                 pos = pos->xl_next;
 1811         }
 1812 
 1813         if (i == XL_RX_LIST_CNT)
 1814                 return (0);
 1815 
 1816         sc->xl_cdata.xl_rx_head = pos;
 1817 
 1818         return (EAGAIN);
 1819 }
 1820 
 1821 /*
 1822  * A frame has been uploaded: pass the resulting mbuf chain up to
 1823  * the higher level protocols.
 1824  */
 1825 static int
 1826 xl_rxeof(struct xl_softc *sc)
 1827 {
 1828         struct mbuf             *m;
 1829         struct ifnet            *ifp = sc->xl_ifp;
 1830         struct xl_chain_onefrag *cur_rx;
 1831         int                     total_len;
 1832         int                     rx_npkts = 0;
 1833         u_int32_t               rxstat;
 1834 
 1835         XL_LOCK_ASSERT(sc);
 1836 again:
 1837         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
 1838             BUS_DMASYNC_POSTREAD);
 1839         while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
 1840 #ifdef DEVICE_POLLING
 1841                 if (ifp->if_capenable & IFCAP_POLLING) {
 1842                         if (sc->rxcycles <= 0)
 1843                                 break;
 1844                         sc->rxcycles--;
 1845                 }
 1846 #endif
 1847                 cur_rx = sc->xl_cdata.xl_rx_head;
 1848                 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
 1849                 total_len = rxstat & XL_RXSTAT_LENMASK;
 1850                 rx_npkts++;
 1851 
 1852                 /*
 1853                  * Since we have told the chip to allow large frames,
 1854                  * we need to trap giant frame errors in software. We allow
 1855                  * a little more than the normal frame size to account for
 1856                  * frames with VLAN tags.
 1857                  */
 1858                 if (total_len > XL_MAX_FRAMELEN)
 1859                         rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
 1860 
 1861                 /*
 1862                  * If an error occurs, update stats, clear the
 1863                  * status word and leave the mbuf cluster in place:
 1864                  * it should simply get re-used next time this descriptor
 1865                  * comes up in the ring.
 1866                  */
 1867                 if (rxstat & XL_RXSTAT_UP_ERROR) {
 1868                         ifp->if_ierrors++;
 1869                         cur_rx->xl_ptr->xl_status = 0;
 1870                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1871                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1872                         continue;
 1873                 }
 1874 
 1875                 /*
 1876                  * If the error bit was not set, the upload complete
 1877                  * bit should be set which means we have a valid packet.
 1878                  * If not, something truly strange has happened.
 1879                  */
 1880                 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
 1881                         device_printf(sc->xl_dev,
 1882                             "bad receive status -- packet dropped\n");
 1883                         ifp->if_ierrors++;
 1884                         cur_rx->xl_ptr->xl_status = 0;
 1885                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1886                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1887                         continue;
 1888                 }
 1889 
 1890                 /* No errors; receive the packet. */
 1891                 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
 1892                     BUS_DMASYNC_POSTREAD);
 1893                 m = cur_rx->xl_mbuf;
 1894 
 1895                 /*
 1896                  * Try to conjure up a new mbuf cluster. If that
 1897                  * fails, it means we have an out of memory condition and
 1898                  * should leave the buffer in place and continue. This will
 1899                  * result in a lost packet, but there's little else we
 1900                  * can do in this situation.
 1901                  */
 1902                 if (xl_newbuf(sc, cur_rx)) {
 1903                         ifp->if_ierrors++;
 1904                         cur_rx->xl_ptr->xl_status = 0;
 1905                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1906                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1907                         continue;
 1908                 }
 1909                 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1910                     sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1911 
 1912                 ifp->if_ipackets++;
 1913                 m->m_pkthdr.rcvif = ifp;
 1914                 m->m_pkthdr.len = m->m_len = total_len;
 1915 
 1916                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 1917                         /* Do IP checksum checking. */
 1918                         if (rxstat & XL_RXSTAT_IPCKOK)
 1919                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1920                         if (!(rxstat & XL_RXSTAT_IPCKERR))
 1921                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1922                         if ((rxstat & XL_RXSTAT_TCPCOK &&
 1923                              !(rxstat & XL_RXSTAT_TCPCKERR)) ||
 1924                             (rxstat & XL_RXSTAT_UDPCKOK &&
 1925                              !(rxstat & XL_RXSTAT_UDPCKERR))) {
 1926                                 m->m_pkthdr.csum_flags |=
 1927                                         CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 1928                                 m->m_pkthdr.csum_data = 0xffff;
 1929                         }
 1930                 }
 1931 
 1932                 XL_UNLOCK(sc);
 1933                 (*ifp->if_input)(ifp, m);
 1934                 XL_LOCK(sc);
 1935 
 1936                 /*
 1937                  * If we are running from the taskqueue, the interface
 1938                  * might have been stopped while we were passing the last
 1939                  * packet up the network stack.
 1940                  */
 1941                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1942                         return (rx_npkts);
 1943         }
 1944 
 1945         /*
 1946          * Handle the 'end of channel' condition. When the upload
 1947          * engine hits the end of the RX ring, it will stall. This
 1948          * is our cue to flush the RX ring, reload the uplist pointer
 1949          * register and unstall the engine.
 1950          * XXX This is actually a little goofy. With the ThunderLAN
 1951          * chip, you get an interrupt when the receiver hits the end
 1952          * of the receive ring, which tells you exactly when you
 1953          * you need to reload the ring pointer. Here we have to
 1954          * fake it. I'm mad at myself for not being clever enough
 1955          * to avoid the use of a goto here.
 1956          */
 1957         if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
 1958                 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
 1959                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 1960                 xl_wait(sc);
 1961                 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 1962                 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
 1963                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 1964                 goto again;
 1965         }
 1966         return (rx_npkts);
 1967 }
 1968 
 1969 /*
 1970  * Taskqueue wrapper for xl_rxeof().
 1971  */
 1972 static void
 1973 xl_rxeof_task(void *arg, int pending)
 1974 {
 1975         struct xl_softc *sc = (struct xl_softc *)arg;
 1976 
 1977         XL_LOCK(sc);
 1978         if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
 1979                 xl_rxeof(sc);
 1980         XL_UNLOCK(sc);
 1981 }
 1982 
 1983 /*
 1984  * A frame was downloaded to the chip. It's safe for us to clean up
 1985  * the list buffers.
 1986  */
 1987 static void
 1988 xl_txeof(struct xl_softc *sc)
 1989 {
 1990         struct xl_chain         *cur_tx;
 1991         struct ifnet            *ifp = sc->xl_ifp;
 1992 
 1993         XL_LOCK_ASSERT(sc);
 1994 
 1995         /*
 1996          * Go through our tx list and free mbufs for those
 1997          * frames that have been uploaded. Note: the 3c905B
 1998          * sets a special bit in the status word to let us
 1999          * know that a frame has been downloaded, but the
 2000          * original 3c900/3c905 adapters don't do that.
 2001          * Consequently, we have to use a different test if
 2002          * xl_type != XL_TYPE_905B.
 2003          */
 2004         while (sc->xl_cdata.xl_tx_head != NULL) {
 2005                 cur_tx = sc->xl_cdata.xl_tx_head;
 2006 
 2007                 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2008                         break;
 2009 
 2010                 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
 2011                 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2012                     BUS_DMASYNC_POSTWRITE);
 2013                 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2014                 m_freem(cur_tx->xl_mbuf);
 2015                 cur_tx->xl_mbuf = NULL;
 2016                 ifp->if_opackets++;
 2017                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2018 
 2019                 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
 2020                 sc->xl_cdata.xl_tx_free = cur_tx;
 2021         }
 2022 
 2023         if (sc->xl_cdata.xl_tx_head == NULL) {
 2024                 sc->xl_wdog_timer = 0;
 2025                 sc->xl_cdata.xl_tx_tail = NULL;
 2026         } else {
 2027                 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
 2028                         !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
 2029                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2030                                 sc->xl_cdata.xl_tx_head->xl_phys);
 2031                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2032                 }
 2033         }
 2034 }
 2035 
 2036 static void
 2037 xl_txeof_90xB(struct xl_softc *sc)
 2038 {
 2039         struct xl_chain         *cur_tx = NULL;
 2040         struct ifnet            *ifp = sc->xl_ifp;
 2041         int                     idx;
 2042 
 2043         XL_LOCK_ASSERT(sc);
 2044 
 2045         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2046             BUS_DMASYNC_POSTREAD);
 2047         idx = sc->xl_cdata.xl_tx_cons;
 2048         while (idx != sc->xl_cdata.xl_tx_prod) {
 2049                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2050 
 2051                 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
 2052                       XL_TXSTAT_DL_COMPLETE))
 2053                         break;
 2054 
 2055                 if (cur_tx->xl_mbuf != NULL) {
 2056                         bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2057                             BUS_DMASYNC_POSTWRITE);
 2058                         bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2059                         m_freem(cur_tx->xl_mbuf);
 2060                         cur_tx->xl_mbuf = NULL;
 2061                 }
 2062 
 2063                 ifp->if_opackets++;
 2064 
 2065                 sc->xl_cdata.xl_tx_cnt--;
 2066                 XL_INC(idx, XL_TX_LIST_CNT);
 2067         }
 2068 
 2069         if (sc->xl_cdata.xl_tx_cnt == 0)
 2070                 sc->xl_wdog_timer = 0;
 2071         sc->xl_cdata.xl_tx_cons = idx;
 2072 
 2073         if (cur_tx != NULL)
 2074                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2075 }
 2076 
 2077 /*
 2078  * TX 'end of channel' interrupt handler. Actually, we should
 2079  * only get a 'TX complete' interrupt if there's a transmit error,
 2080  * so this is really TX error handler.
 2081  */
 2082 static void
 2083 xl_txeoc(struct xl_softc *sc)
 2084 {
 2085         u_int8_t                txstat;
 2086 
 2087         XL_LOCK_ASSERT(sc);
 2088 
 2089         while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
 2090                 if (txstat & XL_TXSTATUS_UNDERRUN ||
 2091                         txstat & XL_TXSTATUS_JABBER ||
 2092                         txstat & XL_TXSTATUS_RECLAIM) {
 2093                         device_printf(sc->xl_dev,
 2094                             "transmission error: 0x%02x\n", txstat);
 2095                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2096                         xl_wait(sc);
 2097                         if (sc->xl_type == XL_TYPE_905B) {
 2098                                 if (sc->xl_cdata.xl_tx_cnt) {
 2099                                         int                     i;
 2100                                         struct xl_chain         *c;
 2101 
 2102                                         i = sc->xl_cdata.xl_tx_cons;
 2103                                         c = &sc->xl_cdata.xl_tx_chain[i];
 2104                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2105                                             c->xl_phys);
 2106                                         CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2107                                         sc->xl_wdog_timer = 5;
 2108                                 }
 2109                         } else {
 2110                                 if (sc->xl_cdata.xl_tx_head != NULL) {
 2111                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2112                                             sc->xl_cdata.xl_tx_head->xl_phys);
 2113                                         sc->xl_wdog_timer = 5;
 2114                                 }
 2115                         }
 2116                         /*
 2117                          * Remember to set this for the
 2118                          * first generation 3c90X chips.
 2119                          */
 2120                         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2121                         if (txstat & XL_TXSTATUS_UNDERRUN &&
 2122                             sc->xl_tx_thresh < XL_PACKET_SIZE) {
 2123                                 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
 2124                                 device_printf(sc->xl_dev,
 2125 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
 2126                         }
 2127                         CSR_WRITE_2(sc, XL_COMMAND,
 2128                             XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2129                         if (sc->xl_type == XL_TYPE_905B) {
 2130                                 CSR_WRITE_2(sc, XL_COMMAND,
 2131                                 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2132                         }
 2133                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2134                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2135                 } else {
 2136                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2137                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2138                 }
 2139                 /*
 2140                  * Write an arbitrary byte to the TX_STATUS register
 2141                  * to clear this interrupt/error and advance to the next.
 2142                  */
 2143                 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
 2144         }
 2145 }
 2146 
 2147 static void
 2148 xl_intr(void *arg)
 2149 {
 2150         struct xl_softc         *sc = arg;
 2151         struct ifnet            *ifp = sc->xl_ifp;
 2152         u_int16_t               status;
 2153 
 2154         XL_LOCK(sc);
 2155 
 2156 #ifdef DEVICE_POLLING
 2157         if (ifp->if_capenable & IFCAP_POLLING) {
 2158                 XL_UNLOCK(sc);
 2159                 return;
 2160         }
 2161 #endif
 2162 
 2163         for (;;) {
 2164                 status = CSR_READ_2(sc, XL_STATUS);
 2165                 if ((status & XL_INTRS) == 0 || status == 0xFFFF)
 2166                         break;
 2167                 CSR_WRITE_2(sc, XL_COMMAND,
 2168                     XL_CMD_INTR_ACK|(status & XL_INTRS));
 2169                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 2170                         break;
 2171 
 2172                 if (status & XL_STAT_UP_COMPLETE) {
 2173                         if (xl_rxeof(sc) == 0) {
 2174                                 while (xl_rx_resync(sc))
 2175                                         xl_rxeof(sc);
 2176                         }
 2177                 }
 2178 
 2179                 if (status & XL_STAT_DOWN_COMPLETE) {
 2180                         if (sc->xl_type == XL_TYPE_905B)
 2181                                 xl_txeof_90xB(sc);
 2182                         else
 2183                                 xl_txeof(sc);
 2184                 }
 2185 
 2186                 if (status & XL_STAT_TX_COMPLETE) {
 2187                         ifp->if_oerrors++;
 2188                         xl_txeoc(sc);
 2189                 }
 2190 
 2191                 if (status & XL_STAT_ADFAIL) {
 2192                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2193                         xl_init_locked(sc);
 2194                         break;
 2195                 }
 2196 
 2197                 if (status & XL_STAT_STATSOFLOW)
 2198                         xl_stats_update(sc);
 2199         }
 2200 
 2201         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2202             ifp->if_drv_flags & IFF_DRV_RUNNING) {
 2203                 if (sc->xl_type == XL_TYPE_905B)
 2204                         xl_start_90xB_locked(ifp);
 2205                 else
 2206                         xl_start_locked(ifp);
 2207         }
 2208 
 2209         XL_UNLOCK(sc);
 2210 }
 2211 
 2212 #ifdef DEVICE_POLLING
 2213 static int
 2214 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2215 {
 2216         struct xl_softc *sc = ifp->if_softc;
 2217         int rx_npkts = 0;
 2218 
 2219         XL_LOCK(sc);
 2220         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2221                 rx_npkts = xl_poll_locked(ifp, cmd, count);
 2222         XL_UNLOCK(sc);
 2223         return (rx_npkts);
 2224 }
 2225 
 2226 static int
 2227 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2228 {
 2229         struct xl_softc *sc = ifp->if_softc;
 2230         int rx_npkts;
 2231 
 2232         XL_LOCK_ASSERT(sc);
 2233 
 2234         sc->rxcycles = count;
 2235         rx_npkts = xl_rxeof(sc);
 2236         if (sc->xl_type == XL_TYPE_905B)
 2237                 xl_txeof_90xB(sc);
 2238         else
 2239                 xl_txeof(sc);
 2240 
 2241         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2242                 if (sc->xl_type == XL_TYPE_905B)
 2243                         xl_start_90xB_locked(ifp);
 2244                 else
 2245                         xl_start_locked(ifp);
 2246         }
 2247 
 2248         if (cmd == POLL_AND_CHECK_STATUS) {
 2249                 u_int16_t status;
 2250 
 2251                 status = CSR_READ_2(sc, XL_STATUS);
 2252                 if (status & XL_INTRS && status != 0xFFFF) {
 2253                         CSR_WRITE_2(sc, XL_COMMAND,
 2254                             XL_CMD_INTR_ACK|(status & XL_INTRS));
 2255 
 2256                         if (status & XL_STAT_TX_COMPLETE) {
 2257                                 ifp->if_oerrors++;
 2258                                 xl_txeoc(sc);
 2259                         }
 2260 
 2261                         if (status & XL_STAT_ADFAIL) {
 2262                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2263                                 xl_init_locked(sc);
 2264                         }
 2265 
 2266                         if (status & XL_STAT_STATSOFLOW)
 2267                                 xl_stats_update(sc);
 2268                 }
 2269         }
 2270         return (rx_npkts);
 2271 }
 2272 #endif /* DEVICE_POLLING */
 2273 
 2274 static void
 2275 xl_tick(void *xsc)
 2276 {
 2277         struct xl_softc *sc = xsc;
 2278         struct mii_data *mii;
 2279 
 2280         XL_LOCK_ASSERT(sc);
 2281 
 2282         if (sc->xl_miibus != NULL) {
 2283                 mii = device_get_softc(sc->xl_miibus);
 2284                 mii_tick(mii);
 2285         }
 2286 
 2287         xl_stats_update(sc);
 2288         if (xl_watchdog(sc) == EJUSTRETURN)
 2289                 return;
 2290 
 2291         callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
 2292 }
 2293 
 2294 static void
 2295 xl_stats_update(struct xl_softc *sc)
 2296 {
 2297         struct ifnet            *ifp = sc->xl_ifp;
 2298         struct xl_stats         xl_stats;
 2299         u_int8_t                *p;
 2300         int                     i;
 2301 
 2302         XL_LOCK_ASSERT(sc);
 2303 
 2304         bzero((char *)&xl_stats, sizeof(struct xl_stats));
 2305 
 2306         p = (u_int8_t *)&xl_stats;
 2307 
 2308         /* Read all the stats registers. */
 2309         XL_SEL_WIN(6);
 2310 
 2311         for (i = 0; i < 16; i++)
 2312                 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
 2313 
 2314         ifp->if_ierrors += xl_stats.xl_rx_overrun;
 2315 
 2316         ifp->if_collisions += xl_stats.xl_tx_multi_collision +
 2317             xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
 2318 
 2319         /*
 2320          * Boomerang and cyclone chips have an extra stats counter
 2321          * in window 4 (BadSSD). We have to read this too in order
 2322          * to clear out all the stats registers and avoid a statsoflow
 2323          * interrupt.
 2324          */
 2325         XL_SEL_WIN(4);
 2326         CSR_READ_1(sc, XL_W4_BADSSD);
 2327         XL_SEL_WIN(7);
 2328 }
 2329 
 2330 /*
 2331  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 2332  * pointers to the fragment pointers.
 2333  */
 2334 static int
 2335 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
 2336 {
 2337         struct mbuf             *m_new;
 2338         struct ifnet            *ifp = sc->xl_ifp;
 2339         int                     error, i, nseg, total_len;
 2340         u_int32_t               status;
 2341 
 2342         XL_LOCK_ASSERT(sc);
 2343 
 2344         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
 2345             sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2346 
 2347         if (error && error != EFBIG) {
 2348                 if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2349                 return (error);
 2350         }
 2351 
 2352         /*
 2353          * Handle special case: we used up all 63 fragments,
 2354          * but we have more mbufs left in the chain. Copy the
 2355          * data into an mbuf cluster. Note that we don't
 2356          * bother clearing the values in the other fragment
 2357          * pointers/counters; it wouldn't gain us anything,
 2358          * and would waste cycles.
 2359          */
 2360         if (error) {
 2361                 m_new = m_collapse(*m_head, M_NOWAIT, XL_MAXFRAGS);
 2362                 if (m_new == NULL) {
 2363                         m_freem(*m_head);
 2364                         *m_head = NULL;
 2365                         return (ENOBUFS);
 2366                 }
 2367                 *m_head = m_new;
 2368 
 2369                 error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
 2370                     *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2371                 if (error) {
 2372                         m_freem(*m_head);
 2373                         *m_head = NULL;
 2374                         if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2375                         return (error);
 2376                 }
 2377         }
 2378 
 2379         KASSERT(nseg <= XL_MAXFRAGS,
 2380             ("%s: too many DMA segments (%d)", __func__, nseg));
 2381         if (nseg == 0) {
 2382                 m_freem(*m_head);
 2383                 *m_head = NULL;
 2384                 return (EIO);
 2385         }
 2386         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
 2387 
 2388         total_len = 0;
 2389         for (i = 0; i < nseg; i++) {
 2390                 KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
 2391                     ("segment size too large"));
 2392                 c->xl_ptr->xl_frag[i].xl_addr =
 2393                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
 2394                 c->xl_ptr->xl_frag[i].xl_len =
 2395                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
 2396                 total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
 2397         }
 2398         c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG);
 2399 
 2400         if (sc->xl_type == XL_TYPE_905B) {
 2401                 status = XL_TXSTAT_RND_DEFEAT;
 2402 
 2403 #ifndef XL905B_TXCSUM_BROKEN
 2404                 if ((*m_head)->m_pkthdr.csum_flags) {
 2405                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
 2406                                 status |= XL_TXSTAT_IPCKSUM;
 2407                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
 2408                                 status |= XL_TXSTAT_TCPCKSUM;
 2409                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
 2410                                 status |= XL_TXSTAT_UDPCKSUM;
 2411                 }
 2412 #endif
 2413         } else
 2414                 status = total_len;
 2415         c->xl_ptr->xl_status = htole32(status);
 2416         c->xl_ptr->xl_next = 0;
 2417 
 2418         c->xl_mbuf = *m_head;
 2419         return (0);
 2420 }
 2421 
 2422 /*
 2423  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 2424  * to the mbuf data regions directly in the transmit lists. We also save a
 2425  * copy of the pointers since the transmit list fragment pointers are
 2426  * physical addresses.
 2427  */
 2428 
 2429 static void
 2430 xl_start(struct ifnet *ifp)
 2431 {
 2432         struct xl_softc         *sc = ifp->if_softc;
 2433 
 2434         XL_LOCK(sc);
 2435 
 2436         if (sc->xl_type == XL_TYPE_905B)
 2437                 xl_start_90xB_locked(ifp);
 2438         else
 2439                 xl_start_locked(ifp);
 2440 
 2441         XL_UNLOCK(sc);
 2442 }
 2443 
 2444 static void
 2445 xl_start_locked(struct ifnet *ifp)
 2446 {
 2447         struct xl_softc         *sc = ifp->if_softc;
 2448         struct mbuf             *m_head;
 2449         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2450         struct xl_chain         *prev_tx;
 2451         int                     error;
 2452 
 2453         XL_LOCK_ASSERT(sc);
 2454 
 2455         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2456             IFF_DRV_RUNNING)
 2457                 return;
 2458         /*
 2459          * Check for an available queue slot. If there are none,
 2460          * punt.
 2461          */
 2462         if (sc->xl_cdata.xl_tx_free == NULL) {
 2463                 xl_txeoc(sc);
 2464                 xl_txeof(sc);
 2465                 if (sc->xl_cdata.xl_tx_free == NULL) {
 2466                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2467                         return;
 2468                 }
 2469         }
 2470 
 2471         start_tx = sc->xl_cdata.xl_tx_free;
 2472 
 2473         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2474             sc->xl_cdata.xl_tx_free != NULL;) {
 2475                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2476                 if (m_head == NULL)
 2477                         break;
 2478 
 2479                 /* Pick a descriptor off the free list. */
 2480                 prev_tx = cur_tx;
 2481                 cur_tx = sc->xl_cdata.xl_tx_free;
 2482 
 2483                 /* Pack the data into the descriptor. */
 2484                 error = xl_encap(sc, cur_tx, &m_head);
 2485                 if (error) {
 2486                         cur_tx = prev_tx;
 2487                         if (m_head == NULL)
 2488                                 break;
 2489                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2490                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2491                         break;
 2492                 }
 2493 
 2494                 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
 2495                 cur_tx->xl_next = NULL;
 2496 
 2497                 /* Chain it together. */
 2498                 if (prev != NULL) {
 2499                         prev->xl_next = cur_tx;
 2500                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2501                 }
 2502                 prev = cur_tx;
 2503 
 2504                 /*
 2505                  * If there's a BPF listener, bounce a copy of this frame
 2506                  * to him.
 2507                  */
 2508                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2509         }
 2510 
 2511         /*
 2512          * If there are no packets queued, bail.
 2513          */
 2514         if (cur_tx == NULL)
 2515                 return;
 2516 
 2517         /*
 2518          * Place the request for the upload interrupt
 2519          * in the last descriptor in the chain. This way, if
 2520          * we're chaining several packets at once, we'll only
 2521          * get an interrupt once for the whole chain rather than
 2522          * once for each packet.
 2523          */
 2524         cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
 2525 
 2526         /*
 2527          * Queue the packets. If the TX channel is clear, update
 2528          * the downlist pointer register.
 2529          */
 2530         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2531         xl_wait(sc);
 2532 
 2533         if (sc->xl_cdata.xl_tx_head != NULL) {
 2534                 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
 2535                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
 2536                     htole32(start_tx->xl_phys);
 2537                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
 2538                     htole32(~XL_TXSTAT_DL_INTR);
 2539                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2540         } else {
 2541                 sc->xl_cdata.xl_tx_head = start_tx;
 2542                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2543         }
 2544         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2545             BUS_DMASYNC_PREWRITE);
 2546         if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2547                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
 2548 
 2549         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2550 
 2551         XL_SEL_WIN(7);
 2552 
 2553         /*
 2554          * Set a timeout in case the chip goes out to lunch.
 2555          */
 2556         sc->xl_wdog_timer = 5;
 2557 
 2558         /*
 2559          * XXX Under certain conditions, usually on slower machines
 2560          * where interrupts may be dropped, it's possible for the
 2561          * adapter to chew up all the buffers in the receive ring
 2562          * and stall, without us being able to do anything about it.
 2563          * To guard against this, we need to make a pass over the
 2564          * RX queue to make sure there aren't any packets pending.
 2565          * Doing it here means we can flush the receive ring at the
 2566          * same time the chip is DMAing the transmit descriptors we
 2567          * just gave it.
 2568          *
 2569          * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
 2570          * nature of their chips in all their marketing literature;
 2571          * we may as well take advantage of it. :)
 2572          */
 2573         taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
 2574 }
 2575 
 2576 static void
 2577 xl_start_90xB_locked(struct ifnet *ifp)
 2578 {
 2579         struct xl_softc         *sc = ifp->if_softc;
 2580         struct mbuf             *m_head;
 2581         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2582         struct xl_chain         *prev_tx;
 2583         int                     error, idx;
 2584 
 2585         XL_LOCK_ASSERT(sc);
 2586 
 2587         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2588             IFF_DRV_RUNNING)
 2589                 return;
 2590 
 2591         idx = sc->xl_cdata.xl_tx_prod;
 2592         start_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2593 
 2594         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2595             sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
 2596                 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
 2597                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2598                         break;
 2599                 }
 2600 
 2601                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2602                 if (m_head == NULL)
 2603                         break;
 2604 
 2605                 prev_tx = cur_tx;
 2606                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2607 
 2608                 /* Pack the data into the descriptor. */
 2609                 error = xl_encap(sc, cur_tx, &m_head);
 2610                 if (error) {
 2611                         cur_tx = prev_tx;
 2612                         if (m_head == NULL)
 2613                                 break;
 2614                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2615                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2616                         break;
 2617                 }
 2618 
 2619                 /* Chain it together. */
 2620                 if (prev != NULL)
 2621                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2622                 prev = cur_tx;
 2623 
 2624                 /*
 2625                  * If there's a BPF listener, bounce a copy of this frame
 2626                  * to him.
 2627                  */
 2628                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2629 
 2630                 XL_INC(idx, XL_TX_LIST_CNT);
 2631                 sc->xl_cdata.xl_tx_cnt++;
 2632         }
 2633 
 2634         /*
 2635          * If there are no packets queued, bail.
 2636          */
 2637         if (cur_tx == NULL)
 2638                 return;
 2639 
 2640         /*
 2641          * Place the request for the upload interrupt
 2642          * in the last descriptor in the chain. This way, if
 2643          * we're chaining several packets at once, we'll only
 2644          * get an interrupt once for the whole chain rather than
 2645          * once for each packet.
 2646          */
 2647         cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
 2648 
 2649         /* Start transmission */
 2650         sc->xl_cdata.xl_tx_prod = idx;
 2651         start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
 2652         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2653             BUS_DMASYNC_PREWRITE);
 2654 
 2655         /*
 2656          * Set a timeout in case the chip goes out to lunch.
 2657          */
 2658         sc->xl_wdog_timer = 5;
 2659 }
 2660 
 2661 static void
 2662 xl_init(void *xsc)
 2663 {
 2664         struct xl_softc         *sc = xsc;
 2665 
 2666         XL_LOCK(sc);
 2667         xl_init_locked(sc);
 2668         XL_UNLOCK(sc);
 2669 }
 2670 
 2671 static void
 2672 xl_init_locked(struct xl_softc *sc)
 2673 {
 2674         struct ifnet            *ifp = sc->xl_ifp;
 2675         int                     error, i;
 2676         struct mii_data         *mii = NULL;
 2677 
 2678         XL_LOCK_ASSERT(sc);
 2679 
 2680         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2681                 return;
 2682         /*
 2683          * Cancel pending I/O and free all RX/TX buffers.
 2684          */
 2685         xl_stop(sc);
 2686 
 2687         /* Reset the chip to a known state. */
 2688         xl_reset(sc);
 2689 
 2690         if (sc->xl_miibus == NULL) {
 2691                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2692                 xl_wait(sc);
 2693         }
 2694         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2695         xl_wait(sc);
 2696         DELAY(10000);
 2697 
 2698         if (sc->xl_miibus != NULL)
 2699                 mii = device_get_softc(sc->xl_miibus);
 2700 
 2701         /*
 2702          * Clear WOL status and disable all WOL feature as WOL
 2703          * would interfere Rx operation under normal environments.
 2704          */
 2705         if ((sc->xl_flags & XL_FLAG_WOL) != 0) {
 2706                 XL_SEL_WIN(7);
 2707                 CSR_READ_2(sc, XL_W7_BM_PME);
 2708                 CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
 2709         }
 2710         /* Init our MAC address */
 2711         XL_SEL_WIN(2);
 2712         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2713                 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
 2714                                 IF_LLADDR(sc->xl_ifp)[i]);
 2715         }
 2716 
 2717         /* Clear the station mask. */
 2718         for (i = 0; i < 3; i++)
 2719                 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
 2720 #ifdef notdef
 2721         /* Reset TX and RX. */
 2722         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2723         xl_wait(sc);
 2724         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2725         xl_wait(sc);
 2726 #endif
 2727         /* Init circular RX list. */
 2728         error = xl_list_rx_init(sc);
 2729         if (error) {
 2730                 device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
 2731                     error);
 2732                 xl_stop(sc);
 2733                 return;
 2734         }
 2735 
 2736         /* Init TX descriptors. */
 2737         if (sc->xl_type == XL_TYPE_905B)
 2738                 error = xl_list_tx_init_90xB(sc);
 2739         else
 2740                 error = xl_list_tx_init(sc);
 2741         if (error) {
 2742                 device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
 2743                     error);
 2744                 xl_stop(sc);
 2745                 return;
 2746         }
 2747 
 2748         /*
 2749          * Set the TX freethresh value.
 2750          * Note that this has no effect on 3c905B "cyclone"
 2751          * cards but is required for 3c900/3c905 "boomerang"
 2752          * cards in order to enable the download engine.
 2753          */
 2754         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2755 
 2756         /* Set the TX start threshold for best performance. */
 2757         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2758 
 2759         /*
 2760          * If this is a 3c905B, also set the tx reclaim threshold.
 2761          * This helps cut down on the number of tx reclaim errors
 2762          * that could happen on a busy network. The chip multiplies
 2763          * the register value by 16 to obtain the actual threshold
 2764          * in bytes, so we divide by 16 when setting the value here.
 2765          * The existing threshold value can be examined by reading
 2766          * the register at offset 9 in window 5.
 2767          */
 2768         if (sc->xl_type == XL_TYPE_905B) {
 2769                 CSR_WRITE_2(sc, XL_COMMAND,
 2770                     XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2771         }
 2772 
 2773         /* Set RX filter bits. */
 2774         xl_rxfilter(sc);
 2775 
 2776         /*
 2777          * Load the address of the RX list. We have to
 2778          * stall the upload engine before we can manipulate
 2779          * the uplist pointer register, then unstall it when
 2780          * we're finished. We also have to wait for the
 2781          * stall command to complete before proceeding.
 2782          * Note that we have to do this after any RX resets
 2783          * have completed since the uplist register is cleared
 2784          * by a reset.
 2785          */
 2786         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2787         xl_wait(sc);
 2788         CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2789         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2790         xl_wait(sc);
 2791 
 2792         if (sc->xl_type == XL_TYPE_905B) {
 2793                 /* Set polling interval */
 2794                 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2795                 /* Load the address of the TX list */
 2796                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2797                 xl_wait(sc);
 2798                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2799                     sc->xl_cdata.xl_tx_chain[0].xl_phys);
 2800                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2801                 xl_wait(sc);
 2802         }
 2803 
 2804         /*
 2805          * If the coax transceiver is on, make sure to enable
 2806          * the DC-DC converter.
 2807          */
 2808         XL_SEL_WIN(3);
 2809         if (sc->xl_xcvr == XL_XCVR_COAX)
 2810                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
 2811         else
 2812                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 2813 
 2814         /*
 2815          * increase packet size to allow reception of 802.1q or ISL packets.
 2816          * For the 3c90x chip, set the 'allow large packets' bit in the MAC
 2817          * control register. For 3c90xB/C chips, use the RX packet size
 2818          * register.
 2819          */
 2820 
 2821         if (sc->xl_type == XL_TYPE_905B)
 2822                 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
 2823         else {
 2824                 u_int8_t macctl;
 2825                 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
 2826                 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
 2827                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
 2828         }
 2829 
 2830         /* Clear out the stats counters. */
 2831         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 2832         xl_stats_update(sc);
 2833         XL_SEL_WIN(4);
 2834         CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
 2835         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
 2836 
 2837         /*
 2838          * Enable interrupts.
 2839          */
 2840         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 2841         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
 2842 #ifdef DEVICE_POLLING
 2843         /* Disable interrupts if we are polling. */
 2844         if (ifp->if_capenable & IFCAP_POLLING)
 2845                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 2846         else
 2847 #endif
 2848         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 2849         if (sc->xl_flags & XL_FLAG_FUNCREG)
 2850             bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 2851 
 2852         /* Set the RX early threshold */
 2853         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
 2854         CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
 2855 
 2856         /* Enable receiver and transmitter. */
 2857         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2858         xl_wait(sc);
 2859         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 2860         xl_wait(sc);
 2861 
 2862         /* XXX Downcall to miibus. */
 2863         if (mii != NULL)
 2864                 mii_mediachg(mii);
 2865 
 2866         /* Select window 7 for normal operations. */
 2867         XL_SEL_WIN(7);
 2868 
 2869         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2870         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2871 
 2872         sc->xl_wdog_timer = 0;
 2873         callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc);
 2874 }
 2875 
 2876 /*
 2877  * Set media options.
 2878  */
 2879 static int
 2880 xl_ifmedia_upd(struct ifnet *ifp)
 2881 {
 2882         struct xl_softc         *sc = ifp->if_softc;
 2883         struct ifmedia          *ifm = NULL;
 2884         struct mii_data         *mii = NULL;
 2885 
 2886         XL_LOCK(sc);
 2887 
 2888         if (sc->xl_miibus != NULL)
 2889                 mii = device_get_softc(sc->xl_miibus);
 2890         if (mii == NULL)
 2891                 ifm = &sc->ifmedia;
 2892         else
 2893                 ifm = &mii->mii_media;
 2894 
 2895         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 2896         case IFM_100_FX:
 2897         case IFM_10_FL:
 2898         case IFM_10_2:
 2899         case IFM_10_5:
 2900                 xl_setmode(sc, ifm->ifm_media);
 2901                 XL_UNLOCK(sc);
 2902                 return (0);
 2903         }
 2904 
 2905         if (sc->xl_media & XL_MEDIAOPT_MII ||
 2906             sc->xl_media & XL_MEDIAOPT_BTX ||
 2907             sc->xl_media & XL_MEDIAOPT_BT4) {
 2908                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2909                 xl_init_locked(sc);
 2910         } else {
 2911                 xl_setmode(sc, ifm->ifm_media);
 2912         }
 2913 
 2914         XL_UNLOCK(sc);
 2915 
 2916         return (0);
 2917 }
 2918 
 2919 /*
 2920  * Report current media status.
 2921  */
 2922 static void
 2923 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 2924 {
 2925         struct xl_softc         *sc = ifp->if_softc;
 2926         u_int32_t               icfg;
 2927         u_int16_t               status = 0;
 2928         struct mii_data         *mii = NULL;
 2929 
 2930         XL_LOCK(sc);
 2931 
 2932         if (sc->xl_miibus != NULL)
 2933                 mii = device_get_softc(sc->xl_miibus);
 2934 
 2935         XL_SEL_WIN(4);
 2936         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 2937 
 2938         XL_SEL_WIN(3);
 2939         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
 2940         icfg >>= XL_ICFG_CONNECTOR_BITS;
 2941 
 2942         ifmr->ifm_active = IFM_ETHER;
 2943         ifmr->ifm_status = IFM_AVALID;
 2944 
 2945         if ((status & XL_MEDIASTAT_CARRIER) == 0)
 2946                 ifmr->ifm_status |= IFM_ACTIVE;
 2947 
 2948         switch (icfg) {
 2949         case XL_XCVR_10BT:
 2950                 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
 2951                 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 2952                         ifmr->ifm_active |= IFM_FDX;
 2953                 else
 2954                         ifmr->ifm_active |= IFM_HDX;
 2955                 break;
 2956         case XL_XCVR_AUI:
 2957                 if (sc->xl_type == XL_TYPE_905B &&
 2958                     sc->xl_media == XL_MEDIAOPT_10FL) {
 2959                         ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
 2960                         if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 2961                                 ifmr->ifm_active |= IFM_FDX;
 2962                         else
 2963                                 ifmr->ifm_active |= IFM_HDX;
 2964                 } else
 2965                         ifmr->ifm_active = IFM_ETHER|IFM_10_5;
 2966                 break;
 2967         case XL_XCVR_COAX:
 2968                 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
 2969                 break;
 2970         /*
 2971          * XXX MII and BTX/AUTO should be separate cases.
 2972          */
 2973 
 2974         case XL_XCVR_100BTX:
 2975         case XL_XCVR_AUTO:
 2976         case XL_XCVR_MII:
 2977                 if (mii != NULL) {
 2978                         mii_pollstat(mii);
 2979                         ifmr->ifm_active = mii->mii_media_active;
 2980                         ifmr->ifm_status = mii->mii_media_status;
 2981                 }
 2982                 break;
 2983         case XL_XCVR_100BFX:
 2984                 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
 2985                 break;
 2986         default:
 2987                 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
 2988                 break;
 2989         }
 2990 
 2991         XL_UNLOCK(sc);
 2992 }
 2993 
 2994 static int
 2995 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 2996 {
 2997         struct xl_softc         *sc = ifp->if_softc;
 2998         struct ifreq            *ifr = (struct ifreq *) data;
 2999         int                     error = 0, mask;
 3000         struct mii_data         *mii = NULL;
 3001 
 3002         switch (command) {
 3003         case SIOCSIFFLAGS:
 3004                 XL_LOCK(sc);
 3005                 if (ifp->if_flags & IFF_UP) {
 3006                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3007                             (ifp->if_flags ^ sc->xl_if_flags) &
 3008                             (IFF_PROMISC | IFF_ALLMULTI))
 3009                                 xl_rxfilter(sc);
 3010                         else
 3011                                 xl_init_locked(sc);
 3012                 } else {
 3013                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3014                                 xl_stop(sc);
 3015                 }
 3016                 sc->xl_if_flags = ifp->if_flags;
 3017                 XL_UNLOCK(sc);
 3018                 break;
 3019         case SIOCADDMULTI:
 3020         case SIOCDELMULTI:
 3021                 /* XXX Downcall from if_addmulti() possibly with locks held. */
 3022                 XL_LOCK(sc);
 3023                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3024                         xl_rxfilter(sc);
 3025                 XL_UNLOCK(sc);
 3026                 break;
 3027         case SIOCGIFMEDIA:
 3028         case SIOCSIFMEDIA:
 3029                 if (sc->xl_miibus != NULL)
 3030                         mii = device_get_softc(sc->xl_miibus);
 3031                 if (mii == NULL)
 3032                         error = ifmedia_ioctl(ifp, ifr,
 3033                             &sc->ifmedia, command);
 3034                 else
 3035                         error = ifmedia_ioctl(ifp, ifr,
 3036                             &mii->mii_media, command);
 3037                 break;
 3038         case SIOCSIFCAP:
 3039                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3040 #ifdef DEVICE_POLLING
 3041                 if ((mask & IFCAP_POLLING) != 0 &&
 3042                     (ifp->if_capabilities & IFCAP_POLLING) != 0) {
 3043                         ifp->if_capenable ^= IFCAP_POLLING;
 3044                         if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
 3045                                 error = ether_poll_register(xl_poll, ifp);
 3046                                 if (error)
 3047                                         break;
 3048                                 XL_LOCK(sc);
 3049                                 /* Disable interrupts */
 3050                                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3051                                 ifp->if_capenable |= IFCAP_POLLING;
 3052                                 XL_UNLOCK(sc);
 3053                         } else {
 3054                                 error = ether_poll_deregister(ifp);
 3055                                 /* Enable interrupts. */
 3056                                 XL_LOCK(sc);
 3057                                 CSR_WRITE_2(sc, XL_COMMAND,
 3058                                     XL_CMD_INTR_ACK | 0xFF);
 3059                                 CSR_WRITE_2(sc, XL_COMMAND,
 3060                                     XL_CMD_INTR_ENB | XL_INTRS);
 3061                                 if (sc->xl_flags & XL_FLAG_FUNCREG)
 3062                                         bus_space_write_4(sc->xl_ftag,
 3063                                             sc->xl_fhandle, 4, 0x8000);
 3064                                 XL_UNLOCK(sc);
 3065                         }
 3066                 }
 3067 #endif /* DEVICE_POLLING */
 3068                 XL_LOCK(sc);
 3069                 if ((mask & IFCAP_TXCSUM) != 0 &&
 3070                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 3071                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3072                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 3073                                 ifp->if_hwassist |= XL905B_CSUM_FEATURES;
 3074                         else
 3075                                 ifp->if_hwassist &= ~XL905B_CSUM_FEATURES;
 3076                 }
 3077                 if ((mask & IFCAP_RXCSUM) != 0 &&
 3078                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
 3079                         ifp->if_capenable ^= IFCAP_RXCSUM;
 3080                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 3081                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 3082                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 3083                 XL_UNLOCK(sc);
 3084                 break;
 3085         default:
 3086                 error = ether_ioctl(ifp, command, data);
 3087                 break;
 3088         }
 3089 
 3090         return (error);
 3091 }
 3092 
 3093 static int
 3094 xl_watchdog(struct xl_softc *sc)
 3095 {
 3096         struct ifnet            *ifp = sc->xl_ifp;
 3097         u_int16_t               status = 0;
 3098         int                     misintr;
 3099 
 3100         XL_LOCK_ASSERT(sc);
 3101 
 3102         if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
 3103                 return (0);
 3104 
 3105         xl_rxeof(sc);
 3106         xl_txeoc(sc);
 3107         misintr = 0;
 3108         if (sc->xl_type == XL_TYPE_905B) {
 3109                 xl_txeof_90xB(sc);
 3110                 if (sc->xl_cdata.xl_tx_cnt == 0)
 3111                         misintr++;
 3112         } else {
 3113                 xl_txeof(sc);
 3114                 if (sc->xl_cdata.xl_tx_head == NULL)
 3115                         misintr++;
 3116         }
 3117         if (misintr != 0) {
 3118                 device_printf(sc->xl_dev,
 3119                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 3120                 return (0);
 3121         }
 3122 
 3123         ifp->if_oerrors++;
 3124         XL_SEL_WIN(4);
 3125         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3126         device_printf(sc->xl_dev, "watchdog timeout\n");
 3127 
 3128         if (status & XL_MEDIASTAT_CARRIER)
 3129                 device_printf(sc->xl_dev,
 3130                     "no carrier - transceiver cable problem?\n");
 3131 
 3132         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3133         xl_init_locked(sc);
 3134 
 3135         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 3136                 if (sc->xl_type == XL_TYPE_905B)
 3137                         xl_start_90xB_locked(ifp);
 3138                 else
 3139                         xl_start_locked(ifp);
 3140         }
 3141 
 3142         return (EJUSTRETURN);
 3143 }
 3144 
 3145 /*
 3146  * Stop the adapter and free any mbufs allocated to the
 3147  * RX and TX lists.
 3148  */
 3149 static void
 3150 xl_stop(struct xl_softc *sc)
 3151 {
 3152         register int            i;
 3153         struct ifnet            *ifp = sc->xl_ifp;
 3154 
 3155         XL_LOCK_ASSERT(sc);
 3156 
 3157         sc->xl_wdog_timer = 0;
 3158 
 3159         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
 3160         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 3161         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
 3162         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
 3163         xl_wait(sc);
 3164         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
 3165         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 3166         DELAY(800);
 3167 
 3168 #ifdef foo
 3169         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 3170         xl_wait(sc);
 3171         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 3172         xl_wait(sc);
 3173 #endif
 3174 
 3175         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
 3176         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
 3177         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3178         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3179                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 3180 
 3181         /* Stop the stats updater. */
 3182         callout_stop(&sc->xl_tick_callout);
 3183 
 3184         /*
 3185          * Free data in the RX lists.
 3186          */
 3187         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 3188                 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
 3189                         bus_dmamap_unload(sc->xl_mtag,
 3190                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3191                         bus_dmamap_destroy(sc->xl_mtag,
 3192                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3193                         m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
 3194                         sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
 3195                 }
 3196         }
 3197         if (sc->xl_ldata.xl_rx_list != NULL)
 3198                 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
 3199         /*
 3200          * Free the TX list buffers.
 3201          */
 3202         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 3203                 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
 3204                         bus_dmamap_unload(sc->xl_mtag,
 3205                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3206                         bus_dmamap_destroy(sc->xl_mtag,
 3207                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3208                         m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
 3209                         sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
 3210                 }
 3211         }
 3212         if (sc->xl_ldata.xl_tx_list != NULL)
 3213                 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
 3214 
 3215         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3216 }
 3217 
 3218 /*
 3219  * Stop all chip I/O so that the kernel's probe routines don't
 3220  * get confused by errant DMAs when rebooting.
 3221  */
 3222 static int
 3223 xl_shutdown(device_t dev)
 3224 {
 3225 
 3226         return (xl_suspend(dev));
 3227 }
 3228 
 3229 static int
 3230 xl_suspend(device_t dev)
 3231 {
 3232         struct xl_softc         *sc;
 3233 
 3234         sc = device_get_softc(dev);
 3235 
 3236         XL_LOCK(sc);
 3237         xl_stop(sc);
 3238         xl_setwol(sc);
 3239         XL_UNLOCK(sc);
 3240 
 3241         return (0);
 3242 }
 3243 
 3244 static int
 3245 xl_resume(device_t dev)
 3246 {
 3247         struct xl_softc         *sc;
 3248         struct ifnet            *ifp;
 3249 
 3250         sc = device_get_softc(dev);
 3251         ifp = sc->xl_ifp;
 3252 
 3253         XL_LOCK(sc);
 3254 
 3255         if (ifp->if_flags & IFF_UP) {
 3256                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3257                 xl_init_locked(sc);
 3258         }
 3259 
 3260         XL_UNLOCK(sc);
 3261 
 3262         return (0);
 3263 }
 3264 
 3265 static void
 3266 xl_setwol(struct xl_softc *sc)
 3267 {
 3268         struct ifnet            *ifp;
 3269         u_int16_t               cfg, pmstat;
 3270 
 3271         if ((sc->xl_flags & XL_FLAG_WOL) == 0)
 3272                 return;
 3273 
 3274         ifp = sc->xl_ifp;
 3275         XL_SEL_WIN(7);
 3276         /* Clear any pending PME events. */
 3277         CSR_READ_2(sc, XL_W7_BM_PME);
 3278         cfg = 0;
 3279         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3280                 cfg |= XL_BM_PME_MAGIC;
 3281         CSR_WRITE_2(sc, XL_W7_BM_PME, cfg);
 3282         /* Enable RX. */
 3283         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3284                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 3285         /* Request PME. */
 3286         pmstat = pci_read_config(sc->xl_dev,
 3287             sc->xl_pmcap + PCIR_POWER_STATUS, 2);
 3288         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3289                 pmstat |= PCIM_PSTAT_PMEENABLE;
 3290         else
 3291                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 3292         pci_write_config(sc->xl_dev,
 3293             sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2);
 3294 }

Cache object: eb44a74bc04805f2c08e158ba8f34f5a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.