The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/7.4/sys/pci/if_xl.c 216034 2010-11-29 01:50:58Z yongari $");
   35 
   36 /*
   37  * 3Com 3c90x Etherlink XL PCI NIC driver
   38  *
   39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
   40  * bus-master chips (3c90x cards and embedded controllers) including
   41  * the following:
   42  *
   43  * 3Com 3c900-TPO       10Mbps/RJ-45
   44  * 3Com 3c900-COMBO     10Mbps/RJ-45,AUI,BNC
   45  * 3Com 3c905-TX        10/100Mbps/RJ-45
   46  * 3Com 3c905-T4        10/100Mbps/RJ-45
   47  * 3Com 3c900B-TPO      10Mbps/RJ-45
   48  * 3Com 3c900B-COMBO    10Mbps/RJ-45,AUI,BNC
   49  * 3Com 3c900B-TPC      10Mbps/RJ-45,BNC
   50  * 3Com 3c900B-FL       10Mbps/Fiber-optic
   51  * 3Com 3c905B-COMBO    10/100Mbps/RJ-45,AUI,BNC
   52  * 3Com 3c905B-TX       10/100Mbps/RJ-45
   53  * 3Com 3c905B-FL/FX    10/100Mbps/Fiber-optic
   54  * 3Com 3c905C-TX       10/100Mbps/RJ-45 (Tornado ASIC)
   55  * 3Com 3c980-TX        10/100Mbps server adapter (Hurricane ASIC)
   56  * 3Com 3c980C-TX       10/100Mbps server adapter (Tornado ASIC)
   57  * 3Com 3cSOHO100-TX    10/100Mbps/RJ-45 (Hurricane ASIC)
   58  * 3Com 3c450-TX        10/100Mbps/RJ-45 (Tornado ASIC)
   59  * 3Com 3c555           10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
   60  * 3Com 3c556           10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   61  * 3Com 3c556B          10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   62  * 3Com 3c575TX         10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   63  * 3Com 3c575B          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   64  * 3Com 3c575C          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   65  * 3Com 3cxfem656       10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   66  * 3Com 3cxfem656b      10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   67  * 3Com 3cxfem656c      10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
   68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
   69  * Dell on-board 3c920 10/100Mbps/RJ-45
   70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
   71  * Dell Latitude laptop docking station embedded 3c905-TX
   72  *
   73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   74  * Electrical Engineering Department
   75  * Columbia University, New York City
   76  */
   77 /*
   78  * The 3c90x series chips use a bus-master DMA interface for transfering
   79  * packets to and from the controller chip. Some of the "vortex" cards
   80  * (3c59x) also supported a bus master mode, however for those chips
   81  * you could only DMA packets to/from a contiguous memory buffer. For
   82  * transmission this would mean copying the contents of the queued mbuf
   83  * chain into an mbuf cluster and then DMAing the cluster. This extra
   84  * copy would sort of defeat the purpose of the bus master support for
   85  * any packet that doesn't fit into a single mbuf.
   86  *
   87  * By contrast, the 3c90x cards support a fragment-based bus master
   88  * mode where mbuf chains can be encapsulated using TX descriptors.
   89  * This is similar to other PCI chips such as the Texas Instruments
   90  * ThunderLAN and the Intel 82557/82558.
   91  *
   92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
   93  * bus master chips because they maintain the old PIO interface for
   94  * backwards compatibility, but starting with the 3c905B and the
   95  * "cyclone" chips, the compatibility interface has been dropped.
   96  * Since using bus master DMA is a big win, we use this driver to
   97  * support the PCI "boomerang" chips even though they work with the
   98  * "vortex" driver in order to obtain better performance.
   99  *
  100  * This driver is in the /sys/pci directory because it only supports
  101  * PCI-based NICs.
  102  */
  103 
  104 #ifdef HAVE_KERNEL_OPTION_HEADERS
  105 #include "opt_device_polling.h"
  106 #endif
  107 
  108 #include <sys/param.h>
  109 #include <sys/systm.h>
  110 #include <sys/sockio.h>
  111 #include <sys/endian.h>
  112 #include <sys/mbuf.h>
  113 #include <sys/kernel.h>
  114 #include <sys/module.h>
  115 #include <sys/socket.h>
  116 #include <sys/taskqueue.h>
  117 
  118 #include <net/if.h>
  119 #include <net/if_arp.h>
  120 #include <net/ethernet.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/if_types.h>
  124 
  125 #include <net/bpf.h>
  126 
  127 #include <machine/bus.h>
  128 #include <machine/resource.h>
  129 #include <sys/bus.h>
  130 #include <sys/rman.h>
  131 
  132 #include <dev/mii/mii.h>
  133 #include <dev/mii/miivar.h>
  134 
  135 #include <dev/pci/pcireg.h>
  136 #include <dev/pci/pcivar.h>
  137 
  138 MODULE_DEPEND(xl, pci, 1, 1, 1);
  139 MODULE_DEPEND(xl, ether, 1, 1, 1);
  140 MODULE_DEPEND(xl, miibus, 1, 1, 1);
  141 
  142 /* "device miibus" required.  See GENERIC if you get errors here. */
  143 #include "miibus_if.h"
  144 
  145 #include <pci/if_xlreg.h>
  146 
  147 /*
  148  * TX Checksumming is disabled by default for two reasons:
  149  * - TX Checksumming will occasionally produce corrupt packets
  150  * - TX Checksumming seems to reduce performance
  151  *
  152  * Only 905B/C cards were reported to have this problem, it is possible
  153  * that later chips _may_ be immune.
  154  */
  155 #define XL905B_TXCSUM_BROKEN    1
  156 
  157 #ifdef XL905B_TXCSUM_BROKEN
  158 #define XL905B_CSUM_FEATURES    0
  159 #else
  160 #define XL905B_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  161 #endif
  162 
  163 /*
  164  * Various supported device vendors/types and their names.
  165  */
  166 static const struct xl_type xl_devs[] = {
  167         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
  168                 "3Com 3c900-TPO Etherlink XL" },
  169         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
  170                 "3Com 3c900-COMBO Etherlink XL" },
  171         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
  172                 "3Com 3c905-TX Fast Etherlink XL" },
  173         { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
  174                 "3Com 3c905-T4 Fast Etherlink XL" },
  175         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
  176                 "3Com 3c900B-TPO Etherlink XL" },
  177         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
  178                 "3Com 3c900B-COMBO Etherlink XL" },
  179         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
  180                 "3Com 3c900B-TPC Etherlink XL" },
  181         { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
  182                 "3Com 3c900B-FL Etherlink XL" },
  183         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
  184                 "3Com 3c905B-TX Fast Etherlink XL" },
  185         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
  186                 "3Com 3c905B-T4 Fast Etherlink XL" },
  187         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
  188                 "3Com 3c905B-FX/SC Fast Etherlink XL" },
  189         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
  190                 "3Com 3c905B-COMBO Fast Etherlink XL" },
  191         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
  192                 "3Com 3c905C-TX Fast Etherlink XL" },
  193         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
  194                 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
  195         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
  196                 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
  197         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
  198                 "3Com 3c980 Fast Etherlink XL" },
  199         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
  200                 "3Com 3c980C Fast Etherlink XL" },
  201         { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
  202                 "3Com 3cSOHO100-TX OfficeConnect" },
  203         { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
  204                 "3Com 3c450-TX HomeConnect" },
  205         { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
  206                 "3Com 3c555 Fast Etherlink XL" },
  207         { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
  208                 "3Com 3c556 Fast Etherlink XL" },
  209         { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
  210                 "3Com 3c556B Fast Etherlink XL" },
  211         { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
  212                 "3Com 3c575TX Fast Etherlink XL" },
  213         { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
  214                 "3Com 3c575B Fast Etherlink XL" },
  215         { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
  216                 "3Com 3c575C Fast Etherlink XL" },
  217         { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
  218                 "3Com 3c656 Fast Etherlink XL" },
  219         { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
  220                 "3Com 3c656B Fast Etherlink XL" },
  221         { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
  222                 "3Com 3c656C Fast Etherlink XL" },
  223         { 0, 0, NULL }
  224 };
  225 
  226 static int xl_probe(device_t);
  227 static int xl_attach(device_t);
  228 static int xl_detach(device_t);
  229 
  230 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
  231 static void xl_stats_update(void *);
  232 static void xl_stats_update_locked(struct xl_softc *);
  233 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
  234 static void xl_rxeof(struct xl_softc *);
  235 static void xl_rxeof_task(void *, int);
  236 static int xl_rx_resync(struct xl_softc *);
  237 static void xl_txeof(struct xl_softc *);
  238 static void xl_txeof_90xB(struct xl_softc *);
  239 static void xl_txeoc(struct xl_softc *);
  240 static void xl_intr(void *);
  241 static void xl_start(struct ifnet *);
  242 static void xl_start_locked(struct ifnet *);
  243 static void xl_start_90xB_locked(struct ifnet *);
  244 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
  245 static void xl_init(void *);
  246 static void xl_init_locked(struct xl_softc *);
  247 static void xl_stop(struct xl_softc *);
  248 static int xl_watchdog(struct xl_softc *);
  249 static int xl_shutdown(device_t);
  250 static int xl_suspend(device_t);
  251 static int xl_resume(device_t);
  252 static void xl_setwol(struct xl_softc *);
  253 
  254 #ifdef DEVICE_POLLING
  255 static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
  256 static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
  257 #endif
  258 
  259 static int xl_ifmedia_upd(struct ifnet *);
  260 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  261 
  262 static int xl_eeprom_wait(struct xl_softc *);
  263 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
  264 static void xl_mii_sync(struct xl_softc *);
  265 static void xl_mii_send(struct xl_softc *, u_int32_t, int);
  266 static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
  267 static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
  268 
  269 static void xl_setcfg(struct xl_softc *);
  270 static void xl_setmode(struct xl_softc *, int);
  271 static void xl_setmulti(struct xl_softc *);
  272 static void xl_setmulti_hash(struct xl_softc *);
  273 static void xl_reset(struct xl_softc *);
  274 static int xl_list_rx_init(struct xl_softc *);
  275 static int xl_list_tx_init(struct xl_softc *);
  276 static int xl_list_tx_init_90xB(struct xl_softc *);
  277 static void xl_wait(struct xl_softc *);
  278 static void xl_mediacheck(struct xl_softc *);
  279 static void xl_choose_media(struct xl_softc *sc, int *media);
  280 static void xl_choose_xcvr(struct xl_softc *, int);
  281 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  282 #ifdef notdef
  283 static void xl_testpacket(struct xl_softc *);
  284 #endif
  285 
  286 static int xl_miibus_readreg(device_t, int, int);
  287 static int xl_miibus_writereg(device_t, int, int, int);
  288 static void xl_miibus_statchg(device_t);
  289 static void xl_miibus_mediainit(device_t);
  290 
  291 static device_method_t xl_methods[] = {
  292         /* Device interface */
  293         DEVMETHOD(device_probe,         xl_probe),
  294         DEVMETHOD(device_attach,        xl_attach),
  295         DEVMETHOD(device_detach,        xl_detach),
  296         DEVMETHOD(device_shutdown,      xl_shutdown),
  297         DEVMETHOD(device_suspend,       xl_suspend),
  298         DEVMETHOD(device_resume,        xl_resume),
  299 
  300         /* bus interface */
  301         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  302         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  303 
  304         /* MII interface */
  305         DEVMETHOD(miibus_readreg,       xl_miibus_readreg),
  306         DEVMETHOD(miibus_writereg,      xl_miibus_writereg),
  307         DEVMETHOD(miibus_statchg,       xl_miibus_statchg),
  308         DEVMETHOD(miibus_mediainit,     xl_miibus_mediainit),
  309 
  310         { 0, 0 }
  311 };
  312 
  313 static driver_t xl_driver = {
  314         "xl",
  315         xl_methods,
  316         sizeof(struct xl_softc)
  317 };
  318 
  319 static devclass_t xl_devclass;
  320 
  321 DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
  322 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
  323 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
  324 
  325 static void
  326 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  327 {
  328         u_int32_t *paddr;
  329 
  330         paddr = arg;
  331         *paddr = segs->ds_addr;
  332 }
  333 
  334 /*
  335  * Murphy's law says that it's possible the chip can wedge and
  336  * the 'command in progress' bit may never clear. Hence, we wait
  337  * only a finite amount of time to avoid getting caught in an
  338  * infinite loop. Normally this delay routine would be a macro,
  339  * but it isn't called during normal operation so we can afford
  340  * to make it a function.
  341  */
  342 static void
  343 xl_wait(struct xl_softc *sc)
  344 {
  345         register int            i;
  346 
  347         for (i = 0; i < XL_TIMEOUT; i++) {
  348                 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
  349                         break;
  350         }
  351 
  352         if (i == XL_TIMEOUT)
  353                 device_printf(sc->xl_dev, "command never completed!\n");
  354 }
  355 
  356 /*
  357  * MII access routines are provided for adapters with external
  358  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
  359  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
  360  * Note: if you don't perform the MDIO operations just right,
  361  * it's possible to end up with code that works correctly with
  362  * some chips/CPUs/processor speeds/bus speeds/etc but not
  363  * with others.
  364  */
  365 #define MII_SET(x)                                      \
  366         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  367                 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
  368 
  369 #define MII_CLR(x)                                      \
  370         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  371                 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
  372 
  373 /*
  374  * Sync the PHYs by setting data bit and strobing the clock 32 times.
  375  */
  376 static void
  377 xl_mii_sync(struct xl_softc *sc)
  378 {
  379         register int            i;
  380 
  381         XL_SEL_WIN(4);
  382         MII_SET(XL_MII_DIR|XL_MII_DATA);
  383 
  384         for (i = 0; i < 32; i++) {
  385                 MII_SET(XL_MII_CLK);
  386                 MII_SET(XL_MII_DATA);
  387                 MII_SET(XL_MII_DATA);
  388                 MII_CLR(XL_MII_CLK);
  389                 MII_SET(XL_MII_DATA);
  390                 MII_SET(XL_MII_DATA);
  391         }
  392 }
  393 
  394 /*
  395  * Clock a series of bits through the MII.
  396  */
  397 static void
  398 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
  399 {
  400         int                     i;
  401 
  402         XL_SEL_WIN(4);
  403         MII_CLR(XL_MII_CLK);
  404 
  405         for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
  406                 if (bits & i) {
  407                         MII_SET(XL_MII_DATA);
  408                 } else {
  409                         MII_CLR(XL_MII_DATA);
  410                 }
  411                 MII_CLR(XL_MII_CLK);
  412                 MII_SET(XL_MII_CLK);
  413         }
  414 }
  415 
  416 /*
  417  * Read an PHY register through the MII.
  418  */
  419 static int
  420 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
  421 {
  422         int                     i, ack;
  423 
  424         /* Set up frame for RX. */
  425         frame->mii_stdelim = XL_MII_STARTDELIM;
  426         frame->mii_opcode = XL_MII_READOP;
  427         frame->mii_turnaround = 0;
  428         frame->mii_data = 0;
  429 
  430         /* Select register window 4. */
  431         XL_SEL_WIN(4);
  432 
  433         CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
  434         /* Turn on data xmit. */
  435         MII_SET(XL_MII_DIR);
  436 
  437         xl_mii_sync(sc);
  438 
  439         /* Send command/address info. */
  440         xl_mii_send(sc, frame->mii_stdelim, 2);
  441         xl_mii_send(sc, frame->mii_opcode, 2);
  442         xl_mii_send(sc, frame->mii_phyaddr, 5);
  443         xl_mii_send(sc, frame->mii_regaddr, 5);
  444 
  445         /* Idle bit */
  446         MII_CLR((XL_MII_CLK|XL_MII_DATA));
  447         MII_SET(XL_MII_CLK);
  448 
  449         /* Turn off xmit. */
  450         MII_CLR(XL_MII_DIR);
  451 
  452         /* Check for ack */
  453         MII_CLR(XL_MII_CLK);
  454         ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
  455         MII_SET(XL_MII_CLK);
  456 
  457         /*
  458          * Now try reading data bits. If the ack failed, we still
  459          * need to clock through 16 cycles to keep the PHY(s) in sync.
  460          */
  461         if (ack) {
  462                 for (i = 0; i < 16; i++) {
  463                         MII_CLR(XL_MII_CLK);
  464                         MII_SET(XL_MII_CLK);
  465                 }
  466                 goto fail;
  467         }
  468 
  469         for (i = 0x8000; i; i >>= 1) {
  470                 MII_CLR(XL_MII_CLK);
  471                 if (!ack) {
  472                         if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
  473                                 frame->mii_data |= i;
  474                 }
  475                 MII_SET(XL_MII_CLK);
  476         }
  477 
  478 fail:
  479         MII_CLR(XL_MII_CLK);
  480         MII_SET(XL_MII_CLK);
  481 
  482         return (ack ? 1 : 0);
  483 }
  484 
  485 /*
  486  * Write to a PHY register through the MII.
  487  */
  488 static int
  489 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
  490 {
  491 
  492         /* Set up frame for TX. */
  493         frame->mii_stdelim = XL_MII_STARTDELIM;
  494         frame->mii_opcode = XL_MII_WRITEOP;
  495         frame->mii_turnaround = XL_MII_TURNAROUND;
  496 
  497         /* Select the window 4. */
  498         XL_SEL_WIN(4);
  499 
  500         /* Turn on data output. */
  501         MII_SET(XL_MII_DIR);
  502 
  503         xl_mii_sync(sc);
  504 
  505         xl_mii_send(sc, frame->mii_stdelim, 2);
  506         xl_mii_send(sc, frame->mii_opcode, 2);
  507         xl_mii_send(sc, frame->mii_phyaddr, 5);
  508         xl_mii_send(sc, frame->mii_regaddr, 5);
  509         xl_mii_send(sc, frame->mii_turnaround, 2);
  510         xl_mii_send(sc, frame->mii_data, 16);
  511 
  512         /* Idle bit. */
  513         MII_SET(XL_MII_CLK);
  514         MII_CLR(XL_MII_CLK);
  515 
  516         /* Turn off xmit. */
  517         MII_CLR(XL_MII_DIR);
  518 
  519         return (0);
  520 }
  521 
  522 static int
  523 xl_miibus_readreg(device_t dev, int phy, int reg)
  524 {
  525         struct xl_softc         *sc;
  526         struct xl_mii_frame     frame;
  527 
  528         sc = device_get_softc(dev);
  529 
  530         bzero((char *)&frame, sizeof(frame));
  531         frame.mii_phyaddr = phy;
  532         frame.mii_regaddr = reg;
  533 
  534         xl_mii_readreg(sc, &frame);
  535 
  536         return (frame.mii_data);
  537 }
  538 
  539 static int
  540 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
  541 {
  542         struct xl_softc         *sc;
  543         struct xl_mii_frame     frame;
  544 
  545         sc = device_get_softc(dev);
  546 
  547         bzero((char *)&frame, sizeof(frame));
  548         frame.mii_phyaddr = phy;
  549         frame.mii_regaddr = reg;
  550         frame.mii_data = data;
  551 
  552         xl_mii_writereg(sc, &frame);
  553 
  554         return (0);
  555 }
  556 
  557 static void
  558 xl_miibus_statchg(device_t dev)
  559 {
  560         struct xl_softc         *sc;
  561         struct mii_data         *mii;
  562         uint8_t                 macctl;
  563 
  564         sc = device_get_softc(dev);
  565         mii = device_get_softc(sc->xl_miibus);
  566 
  567         xl_setcfg(sc);
  568 
  569         /* Set ASIC's duplex mode to match the PHY. */
  570         XL_SEL_WIN(3);
  571         macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
  572         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  573                 macctl |= XL_MACCTRL_DUPLEX;
  574                 if (sc->xl_type == XL_TYPE_905B) {
  575                         if ((IFM_OPTIONS(mii->mii_media_active) &
  576                             IFM_ETH_RXPAUSE) != 0)
  577                                 macctl |= XL_MACCTRL_FLOW_CONTROL_ENB;
  578                         else
  579                                 macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  580                 }
  581         } else {
  582                 macctl &= ~XL_MACCTRL_DUPLEX;
  583                 if (sc->xl_type == XL_TYPE_905B)
  584                         macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB;
  585         }
  586         CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
  587 }
  588 
  589 /*
  590  * Special support for the 3c905B-COMBO. This card has 10/100 support
  591  * plus BNC and AUI ports. This means we will have both an miibus attached
  592  * plus some non-MII media settings. In order to allow this, we have to
  593  * add the extra media to the miibus's ifmedia struct, but we can't do
  594  * that during xl_attach() because the miibus hasn't been attached yet.
  595  * So instead, we wait until the miibus probe/attach is done, at which
  596  * point we will get a callback telling is that it's safe to add our
  597  * extra media.
  598  */
  599 static void
  600 xl_miibus_mediainit(device_t dev)
  601 {
  602         struct xl_softc         *sc;
  603         struct mii_data         *mii;
  604         struct ifmedia          *ifm;
  605 
  606         sc = device_get_softc(dev);
  607         mii = device_get_softc(sc->xl_miibus);
  608         ifm = &mii->mii_media;
  609 
  610         if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
  611                 /*
  612                  * Check for a 10baseFL board in disguise.
  613                  */
  614                 if (sc->xl_type == XL_TYPE_905B &&
  615                     sc->xl_media == XL_MEDIAOPT_10FL) {
  616                         if (bootverbose)
  617                                 device_printf(sc->xl_dev, "found 10baseFL\n");
  618                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
  619                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
  620                             NULL);
  621                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
  622                                 ifmedia_add(ifm,
  623                                     IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
  624                 } else {
  625                         if (bootverbose)
  626                                 device_printf(sc->xl_dev, "found AUI\n");
  627                         ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
  628                 }
  629         }
  630 
  631         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  632                 if (bootverbose)
  633                         device_printf(sc->xl_dev, "found BNC\n");
  634                 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
  635         }
  636 }
  637 
  638 /*
  639  * The EEPROM is slow: give it time to come ready after issuing
  640  * it a command.
  641  */
  642 static int
  643 xl_eeprom_wait(struct xl_softc *sc)
  644 {
  645         int                     i;
  646 
  647         for (i = 0; i < 100; i++) {
  648                 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
  649                         DELAY(162);
  650                 else
  651                         break;
  652         }
  653 
  654         if (i == 100) {
  655                 device_printf(sc->xl_dev, "eeprom failed to come ready\n");
  656                 return (1);
  657         }
  658 
  659         return (0);
  660 }
  661 
  662 /*
  663  * Read a sequence of words from the EEPROM. Note that ethernet address
  664  * data is stored in the EEPROM in network byte order.
  665  */
  666 static int
  667 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
  668 {
  669         int                     err = 0, i;
  670         u_int16_t               word = 0, *ptr;
  671 
  672 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
  673 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
  674         /*
  675          * XXX: WARNING! DANGER!
  676          * It's easy to accidentally overwrite the rom content!
  677          * Note: the 3c575 uses 8bit EEPROM offsets.
  678          */
  679         XL_SEL_WIN(0);
  680 
  681         if (xl_eeprom_wait(sc))
  682                 return (1);
  683 
  684         if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
  685                 off += 0x30;
  686 
  687         for (i = 0; i < cnt; i++) {
  688                 if (sc->xl_flags & XL_FLAG_8BITROM)
  689                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  690                             XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
  691                 else
  692                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  693                             XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
  694                 err = xl_eeprom_wait(sc);
  695                 if (err)
  696                         break;
  697                 word = CSR_READ_2(sc, XL_W0_EE_DATA);
  698                 ptr = (u_int16_t *)(dest + (i * 2));
  699                 if (swap)
  700                         *ptr = ntohs(word);
  701                 else
  702                         *ptr = word;
  703         }
  704 
  705         return (err ? 1 : 0);
  706 }
  707 
  708 /*
  709  * NICs older than the 3c905B have only one multicast option, which
  710  * is to enable reception of all multicast frames.
  711  */
  712 static void
  713 xl_setmulti(struct xl_softc *sc)
  714 {
  715         struct ifnet            *ifp = sc->xl_ifp;
  716         struct ifmultiaddr      *ifma;
  717         u_int8_t                rxfilt;
  718         int                     mcnt = 0;
  719 
  720         XL_LOCK_ASSERT(sc);
  721 
  722         XL_SEL_WIN(5);
  723         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  724 
  725         if (ifp->if_flags & IFF_ALLMULTI) {
  726                 rxfilt |= XL_RXFILTER_ALLMULTI;
  727                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  728                 return;
  729         }
  730 
  731         IF_ADDR_LOCK(ifp);
  732         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
  733                 mcnt++;
  734         IF_ADDR_UNLOCK(ifp);
  735 
  736         if (mcnt)
  737                 rxfilt |= XL_RXFILTER_ALLMULTI;
  738         else
  739                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  740 
  741         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  742 }
  743 
  744 /*
  745  * 3c905B adapters have a hash filter that we can program.
  746  */
  747 static void
  748 xl_setmulti_hash(struct xl_softc *sc)
  749 {
  750         struct ifnet            *ifp = sc->xl_ifp;
  751         int                     h = 0, i;
  752         struct ifmultiaddr      *ifma;
  753         u_int8_t                rxfilt;
  754         int                     mcnt = 0;
  755 
  756         XL_LOCK_ASSERT(sc);
  757 
  758         XL_SEL_WIN(5);
  759         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  760 
  761         if (ifp->if_flags & IFF_ALLMULTI) {
  762                 rxfilt |= XL_RXFILTER_ALLMULTI;
  763                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  764                 return;
  765         } else
  766                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  767 
  768         /* first, zot all the existing hash bits */
  769         for (i = 0; i < XL_HASHFILT_SIZE; i++)
  770                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
  771 
  772         /* now program new ones */
  773         IF_ADDR_LOCK(ifp);
  774         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  775                 if (ifma->ifma_addr->sa_family != AF_LINK)
  776                         continue;
  777                 /*
  778                  * Note: the 3c905B currently only supports a 64-bit hash
  779                  * table, which means we really only need 6 bits, but the
  780                  * manual indicates that future chip revisions will have a
  781                  * 256-bit hash table, hence the routine is set up to
  782                  * calculate 8 bits of position info in case we need it some
  783                  * day.
  784                  * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
  785                  * a 256 bit hash table. This means we have to use all 8 bits
  786                  * regardless. On older cards, the upper 2 bits will be
  787                  * ignored. Grrrr....
  788                  */
  789                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  790                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
  791                 CSR_WRITE_2(sc, XL_COMMAND,
  792                     h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
  793                 mcnt++;
  794         }
  795         IF_ADDR_UNLOCK(ifp);
  796 
  797         if (mcnt)
  798                 rxfilt |= XL_RXFILTER_MULTIHASH;
  799         else
  800                 rxfilt &= ~XL_RXFILTER_MULTIHASH;
  801 
  802         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  803 }
  804 
  805 static void
  806 xl_setcfg(struct xl_softc *sc)
  807 {
  808         u_int32_t               icfg;
  809 
  810         /*XL_LOCK_ASSERT(sc);*/
  811 
  812         XL_SEL_WIN(3);
  813         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  814         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  815         if (sc->xl_media & XL_MEDIAOPT_MII ||
  816                 sc->xl_media & XL_MEDIAOPT_BT4)
  817                 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
  818         if (sc->xl_media & XL_MEDIAOPT_BTX)
  819                 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
  820 
  821         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  822         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  823 }
  824 
  825 static void
  826 xl_setmode(struct xl_softc *sc, int media)
  827 {
  828         u_int32_t               icfg;
  829         u_int16_t               mediastat;
  830         char                    *pmsg = "", *dmsg = "";
  831 
  832         XL_LOCK_ASSERT(sc);
  833 
  834         XL_SEL_WIN(4);
  835         mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
  836         XL_SEL_WIN(3);
  837         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  838 
  839         if (sc->xl_media & XL_MEDIAOPT_BT) {
  840                 if (IFM_SUBTYPE(media) == IFM_10_T) {
  841                         pmsg = "10baseT transceiver";
  842                         sc->xl_xcvr = XL_XCVR_10BT;
  843                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  844                         icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
  845                         mediastat |= XL_MEDIASTAT_LINKBEAT |
  846                             XL_MEDIASTAT_JABGUARD;
  847                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  848                 }
  849         }
  850 
  851         if (sc->xl_media & XL_MEDIAOPT_BFX) {
  852                 if (IFM_SUBTYPE(media) == IFM_100_FX) {
  853                         pmsg = "100baseFX port";
  854                         sc->xl_xcvr = XL_XCVR_100BFX;
  855                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  856                         icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
  857                         mediastat |= XL_MEDIASTAT_LINKBEAT;
  858                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  859                 }
  860         }
  861 
  862         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
  863                 if (IFM_SUBTYPE(media) == IFM_10_5) {
  864                         pmsg = "AUI port";
  865                         sc->xl_xcvr = XL_XCVR_AUI;
  866                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  867                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  868                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  869                             XL_MEDIASTAT_JABGUARD);
  870                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  871                 }
  872                 if (IFM_SUBTYPE(media) == IFM_10_FL) {
  873                         pmsg = "10baseFL transceiver";
  874                         sc->xl_xcvr = XL_XCVR_AUI;
  875                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  876                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  877                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  878                             XL_MEDIASTAT_JABGUARD);
  879                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  880                 }
  881         }
  882 
  883         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  884                 if (IFM_SUBTYPE(media) == IFM_10_2) {
  885                         pmsg = "AUI port";
  886                         sc->xl_xcvr = XL_XCVR_COAX;
  887                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  888                         icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
  889                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  890                             XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
  891                 }
  892         }
  893 
  894         if ((media & IFM_GMASK) == IFM_FDX ||
  895                         IFM_SUBTYPE(media) == IFM_100_FX) {
  896                 dmsg = "full";
  897                 XL_SEL_WIN(3);
  898                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  899         } else {
  900                 dmsg = "half";
  901                 XL_SEL_WIN(3);
  902                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  903                         (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  904         }
  905 
  906         if (IFM_SUBTYPE(media) == IFM_10_2)
  907                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
  908         else
  909                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  910 
  911         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  912         XL_SEL_WIN(4);
  913         CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
  914 
  915         DELAY(800);
  916         XL_SEL_WIN(7);
  917 
  918         device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
  919 }
  920 
  921 static void
  922 xl_reset(struct xl_softc *sc)
  923 {
  924         register int            i;
  925 
  926         XL_LOCK_ASSERT(sc);
  927 
  928         XL_SEL_WIN(0);
  929         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
  930             ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
  931              XL_RESETOPT_DISADVFD:0));
  932 
  933         /*
  934          * If we're using memory mapped register mode, pause briefly
  935          * after issuing the reset command before trying to access any
  936          * other registers. With my 3c575C cardbus card, failing to do
  937          * this results in the system locking up while trying to poll
  938          * the command busy bit in the status register.
  939          */
  940         if (sc->xl_flags & XL_FLAG_USE_MMIO)
  941                 DELAY(100000);
  942 
  943         for (i = 0; i < XL_TIMEOUT; i++) {
  944                 DELAY(10);
  945                 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
  946                         break;
  947         }
  948 
  949         if (i == XL_TIMEOUT)
  950                 device_printf(sc->xl_dev, "reset didn't complete\n");
  951 
  952         /* Reset TX and RX. */
  953         /* Note: the RX reset takes an absurd amount of time
  954          * on newer versions of the Tornado chips such as those
  955          * on the 3c905CX and newer 3c908C cards. We wait an
  956          * extra amount of time so that xl_wait() doesn't complain
  957          * and annoy the users.
  958          */
  959         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
  960         DELAY(100000);
  961         xl_wait(sc);
  962         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
  963         xl_wait(sc);
  964 
  965         if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
  966             sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
  967                 XL_SEL_WIN(2);
  968                 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
  969                     CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
  970                     ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
  971                     XL_RESETOPT_INVERT_LED : 0) |
  972                     ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
  973                     XL_RESETOPT_INVERT_MII : 0));
  974         }
  975 
  976         /* Wait a little while for the chip to get its brains in order. */
  977         DELAY(100000);
  978 }
  979 
  980 /*
  981  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
  982  * IDs against our list and return a device name if we find a match.
  983  */
  984 static int
  985 xl_probe(device_t dev)
  986 {
  987         const struct xl_type    *t;
  988 
  989         t = xl_devs;
  990 
  991         while (t->xl_name != NULL) {
  992                 if ((pci_get_vendor(dev) == t->xl_vid) &&
  993                     (pci_get_device(dev) == t->xl_did)) {
  994                         device_set_desc(dev, t->xl_name);
  995                         return (BUS_PROBE_DEFAULT);
  996                 }
  997                 t++;
  998         }
  999 
 1000         return (ENXIO);
 1001 }
 1002 
 1003 /*
 1004  * This routine is a kludge to work around possible hardware faults
 1005  * or manufacturing defects that can cause the media options register
 1006  * (or reset options register, as it's called for the first generation
 1007  * 3c90x adapters) to return an incorrect result. I have encountered
 1008  * one Dell Latitude laptop docking station with an integrated 3c905-TX
 1009  * which doesn't have any of the 'mediaopt' bits set. This screws up
 1010  * the attach routine pretty badly because it doesn't know what media
 1011  * to look for. If we find ourselves in this predicament, this routine
 1012  * will try to guess the media options values and warn the user of a
 1013  * possible manufacturing defect with his adapter/system/whatever.
 1014  */
 1015 static void
 1016 xl_mediacheck(struct xl_softc *sc)
 1017 {
 1018 
 1019         /*
 1020          * If some of the media options bits are set, assume they are
 1021          * correct. If not, try to figure it out down below.
 1022          * XXX I should check for 10baseFL, but I don't have an adapter
 1023          * to test with.
 1024          */
 1025         if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
 1026                 /*
 1027                  * Check the XCVR value. If it's not in the normal range
 1028                  * of values, we need to fake it up here.
 1029                  */
 1030                 if (sc->xl_xcvr <= XL_XCVR_AUTO)
 1031                         return;
 1032                 else {
 1033                         device_printf(sc->xl_dev,
 1034                             "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
 1035                         device_printf(sc->xl_dev,
 1036                             "choosing new default based on card type\n");
 1037                 }
 1038         } else {
 1039                 if (sc->xl_type == XL_TYPE_905B &&
 1040                     sc->xl_media & XL_MEDIAOPT_10FL)
 1041                         return;
 1042                 device_printf(sc->xl_dev,
 1043 "WARNING: no media options bits set in the media options register!!\n");
 1044                 device_printf(sc->xl_dev,
 1045 "this could be a manufacturing defect in your adapter or system\n");
 1046                 device_printf(sc->xl_dev,
 1047 "attempting to guess media type; you should probably consult your vendor\n");
 1048         }
 1049 
 1050         xl_choose_xcvr(sc, 1);
 1051 }
 1052 
 1053 static void
 1054 xl_choose_xcvr(struct xl_softc *sc, int verbose)
 1055 {
 1056         u_int16_t               devid;
 1057 
 1058         /*
 1059          * Read the device ID from the EEPROM.
 1060          * This is what's loaded into the PCI device ID register, so it has
 1061          * to be correct otherwise we wouldn't have gotten this far.
 1062          */
 1063         xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
 1064 
 1065         switch (devid) {
 1066         case TC_DEVICEID_BOOMERANG_10BT:        /* 3c900-TPO */
 1067         case TC_DEVICEID_KRAKATOA_10BT:         /* 3c900B-TPO */
 1068                 sc->xl_media = XL_MEDIAOPT_BT;
 1069                 sc->xl_xcvr = XL_XCVR_10BT;
 1070                 if (verbose)
 1071                         device_printf(sc->xl_dev,
 1072                             "guessing 10BaseT transceiver\n");
 1073                 break;
 1074         case TC_DEVICEID_BOOMERANG_10BT_COMBO:  /* 3c900-COMBO */
 1075         case TC_DEVICEID_KRAKATOA_10BT_COMBO:   /* 3c900B-COMBO */
 1076                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1077                 sc->xl_xcvr = XL_XCVR_10BT;
 1078                 if (verbose)
 1079                         device_printf(sc->xl_dev,
 1080                             "guessing COMBO (AUI/BNC/TP)\n");
 1081                 break;
 1082         case TC_DEVICEID_KRAKATOA_10BT_TPC:     /* 3c900B-TPC */
 1083                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
 1084                 sc->xl_xcvr = XL_XCVR_10BT;
 1085                 if (verbose)
 1086                         device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
 1087                 break;
 1088         case TC_DEVICEID_CYCLONE_10FL:          /* 3c900B-FL */
 1089                 sc->xl_media = XL_MEDIAOPT_10FL;
 1090                 sc->xl_xcvr = XL_XCVR_AUI;
 1091                 if (verbose)
 1092                         device_printf(sc->xl_dev, "guessing 10baseFL\n");
 1093                 break;
 1094         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1095         case TC_DEVICEID_HURRICANE_555:         /* 3c555 */
 1096         case TC_DEVICEID_HURRICANE_556:         /* 3c556 */
 1097         case TC_DEVICEID_HURRICANE_556B:        /* 3c556B */
 1098         case TC_DEVICEID_HURRICANE_575A:        /* 3c575TX */
 1099         case TC_DEVICEID_HURRICANE_575B:        /* 3c575B */
 1100         case TC_DEVICEID_HURRICANE_575C:        /* 3c575C */
 1101         case TC_DEVICEID_HURRICANE_656:         /* 3c656 */
 1102         case TC_DEVICEID_HURRICANE_656B:        /* 3c656B */
 1103         case TC_DEVICEID_TORNADO_656C:          /* 3c656C */
 1104         case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
 1105         case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:     /* 3c920B-EMB-WNM */
 1106                 sc->xl_media = XL_MEDIAOPT_MII;
 1107                 sc->xl_xcvr = XL_XCVR_MII;
 1108                 if (verbose)
 1109                         device_printf(sc->xl_dev, "guessing MII\n");
 1110                 break;
 1111         case TC_DEVICEID_BOOMERANG_100BT4:      /* 3c905-T4 */
 1112         case TC_DEVICEID_CYCLONE_10_100BT4:     /* 3c905B-T4 */
 1113                 sc->xl_media = XL_MEDIAOPT_BT4;
 1114                 sc->xl_xcvr = XL_XCVR_MII;
 1115                 if (verbose)
 1116                         device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
 1117                 break;
 1118         case TC_DEVICEID_HURRICANE_10_100BT:    /* 3c905B-TX */
 1119         case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
 1120         case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
 1121         case TC_DEVICEID_HURRICANE_SOHO100TX:   /* 3cSOHO100-TX */
 1122         case TC_DEVICEID_TORNADO_10_100BT:      /* 3c905C-TX */
 1123         case TC_DEVICEID_TORNADO_HOMECONNECT:   /* 3c450-TX */
 1124                 sc->xl_media = XL_MEDIAOPT_BTX;
 1125                 sc->xl_xcvr = XL_XCVR_AUTO;
 1126                 if (verbose)
 1127                         device_printf(sc->xl_dev, "guessing 10/100 internal\n");
 1128                 break;
 1129         case TC_DEVICEID_CYCLONE_10_100_COMBO:  /* 3c905B-COMBO */
 1130                 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1131                 sc->xl_xcvr = XL_XCVR_AUTO;
 1132                 if (verbose)
 1133                         device_printf(sc->xl_dev,
 1134                             "guessing 10/100 plus BNC/AUI\n");
 1135                 break;
 1136         default:
 1137                 device_printf(sc->xl_dev,
 1138                     "unknown device ID: %x -- defaulting to 10baseT\n", devid);
 1139                 sc->xl_media = XL_MEDIAOPT_BT;
 1140                 break;
 1141         }
 1142 }
 1143 
 1144 /*
 1145  * Attach the interface. Allocate softc structures, do ifmedia
 1146  * setup and ethernet/BPF attach.
 1147  */
 1148 static int
 1149 xl_attach(device_t dev)
 1150 {
 1151         u_char                  eaddr[ETHER_ADDR_LEN];
 1152         u_int16_t               sinfo2, xcvr[2];
 1153         struct xl_softc         *sc;
 1154         struct ifnet            *ifp;
 1155         int                     media, pmcap;
 1156         int                     error = 0, phy, rid, res, unit;
 1157         uint16_t                did;
 1158 
 1159         sc = device_get_softc(dev);
 1160         sc->xl_dev = dev;
 1161 
 1162         unit = device_get_unit(dev);
 1163 
 1164         mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1165             MTX_DEF);
 1166         ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
 1167 
 1168         did = pci_get_device(dev);
 1169 
 1170         sc->xl_flags = 0;
 1171         if (did == TC_DEVICEID_HURRICANE_555)
 1172                 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
 1173         if (did == TC_DEVICEID_HURRICANE_556 ||
 1174             did == TC_DEVICEID_HURRICANE_556B)
 1175                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
 1176                     XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
 1177                     XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
 1178         if (did == TC_DEVICEID_HURRICANE_555 ||
 1179             did == TC_DEVICEID_HURRICANE_556)
 1180                 sc->xl_flags |= XL_FLAG_8BITROM;
 1181         if (did == TC_DEVICEID_HURRICANE_556B)
 1182                 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
 1183 
 1184         if (did == TC_DEVICEID_HURRICANE_575B ||
 1185             did == TC_DEVICEID_HURRICANE_575C ||
 1186             did == TC_DEVICEID_HURRICANE_656B ||
 1187             did == TC_DEVICEID_TORNADO_656C)
 1188                 sc->xl_flags |= XL_FLAG_FUNCREG;
 1189         if (did == TC_DEVICEID_HURRICANE_575A ||
 1190             did == TC_DEVICEID_HURRICANE_575B ||
 1191             did == TC_DEVICEID_HURRICANE_575C ||
 1192             did == TC_DEVICEID_HURRICANE_656B ||
 1193             did == TC_DEVICEID_TORNADO_656C)
 1194                 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
 1195                   XL_FLAG_8BITROM;
 1196         if (did == TC_DEVICEID_HURRICANE_656)
 1197                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
 1198         if (did == TC_DEVICEID_HURRICANE_575B)
 1199                 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
 1200         if (did == TC_DEVICEID_HURRICANE_575C)
 1201                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1202         if (did == TC_DEVICEID_TORNADO_656C)
 1203                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1204         if (did == TC_DEVICEID_HURRICANE_656 ||
 1205             did == TC_DEVICEID_HURRICANE_656B)
 1206                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
 1207                     XL_FLAG_INVERT_LED_PWR;
 1208         if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
 1209             did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
 1210                 sc->xl_flags |= XL_FLAG_PHYOK;
 1211 
 1212         switch (did) {
 1213         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1214         case TC_DEVICEID_HURRICANE_575A:
 1215         case TC_DEVICEID_HURRICANE_575B:
 1216         case TC_DEVICEID_HURRICANE_575C:
 1217                 sc->xl_flags |= XL_FLAG_NO_MMIO;
 1218                 break;
 1219         default:
 1220                 break;
 1221         }
 1222 
 1223         /*
 1224          * Map control/status registers.
 1225          */
 1226         pci_enable_busmaster(dev);
 1227 
 1228         if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
 1229                 rid = XL_PCI_LOMEM;
 1230                 res = SYS_RES_MEMORY;
 1231 
 1232                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1233         }
 1234 
 1235         if (sc->xl_res != NULL) {
 1236                 sc->xl_flags |= XL_FLAG_USE_MMIO;
 1237                 if (bootverbose)
 1238                         device_printf(dev, "using memory mapped I/O\n");
 1239         } else {
 1240                 rid = XL_PCI_LOIO;
 1241                 res = SYS_RES_IOPORT;
 1242                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1243                 if (sc->xl_res == NULL) {
 1244                         device_printf(dev, "couldn't map ports/memory\n");
 1245                         error = ENXIO;
 1246                         goto fail;
 1247                 }
 1248                 if (bootverbose)
 1249                         device_printf(dev, "using port I/O\n");
 1250         }
 1251 
 1252         sc->xl_btag = rman_get_bustag(sc->xl_res);
 1253         sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
 1254 
 1255         if (sc->xl_flags & XL_FLAG_FUNCREG) {
 1256                 rid = XL_PCI_FUNCMEM;
 1257                 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1258                     RF_ACTIVE);
 1259 
 1260                 if (sc->xl_fres == NULL) {
 1261                         device_printf(dev, "couldn't map funcreg memory\n");
 1262                         error = ENXIO;
 1263                         goto fail;
 1264                 }
 1265 
 1266                 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
 1267                 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
 1268         }
 1269 
 1270         /* Allocate interrupt */
 1271         rid = 0;
 1272         sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1273             RF_SHAREABLE | RF_ACTIVE);
 1274         if (sc->xl_irq == NULL) {
 1275                 device_printf(dev, "couldn't map interrupt\n");
 1276                 error = ENXIO;
 1277                 goto fail;
 1278         }
 1279 
 1280         /* Initialize interface name. */
 1281         ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
 1282         if (ifp == NULL) {
 1283                 device_printf(dev, "can not if_alloc()\n");
 1284                 error = ENOSPC;
 1285                 goto fail;
 1286         }
 1287         ifp->if_softc = sc;
 1288         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1289 
 1290         /* Reset the adapter. */
 1291         XL_LOCK(sc);
 1292         xl_reset(sc);
 1293         XL_UNLOCK(sc);
 1294 
 1295         /*
 1296          * Get station address from the EEPROM.
 1297          */
 1298         if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
 1299                 device_printf(dev, "failed to read station address\n");
 1300                 error = ENXIO;
 1301                 goto fail;
 1302         }
 1303 
 1304         sc->xl_unit = unit;
 1305         callout_init_mtx(&sc->xl_stat_callout, &sc->xl_mtx, 0);
 1306         TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
 1307 
 1308         /*
 1309          * Now allocate a tag for the DMA descriptor lists and a chunk
 1310          * of DMA-able memory based on the tag.  Also obtain the DMA
 1311          * addresses of the RX and TX ring, which we'll need later.
 1312          * All of our lists are allocated as a contiguous block
 1313          * of memory.
 1314          */
 1315         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1316             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1317             XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
 1318             &sc->xl_ldata.xl_rx_tag);
 1319         if (error) {
 1320                 device_printf(dev, "failed to allocate rx dma tag\n");
 1321                 goto fail;
 1322         }
 1323 
 1324         error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
 1325             (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1326             &sc->xl_ldata.xl_rx_dmamap);
 1327         if (error) {
 1328                 device_printf(dev, "no memory for rx list buffers!\n");
 1329                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1330                 sc->xl_ldata.xl_rx_tag = NULL;
 1331                 goto fail;
 1332         }
 1333 
 1334         error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
 1335             sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
 1336             XL_RX_LIST_SZ, xl_dma_map_addr,
 1337             &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
 1338         if (error) {
 1339                 device_printf(dev, "cannot get dma address of the rx ring!\n");
 1340                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1341                     sc->xl_ldata.xl_rx_dmamap);
 1342                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1343                 sc->xl_ldata.xl_rx_tag = NULL;
 1344                 goto fail;
 1345         }
 1346 
 1347         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1348             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1349             XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
 1350             &sc->xl_ldata.xl_tx_tag);
 1351         if (error) {
 1352                 device_printf(dev, "failed to allocate tx dma tag\n");
 1353                 goto fail;
 1354         }
 1355 
 1356         error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
 1357             (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1358             &sc->xl_ldata.xl_tx_dmamap);
 1359         if (error) {
 1360                 device_printf(dev, "no memory for list buffers!\n");
 1361                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1362                 sc->xl_ldata.xl_tx_tag = NULL;
 1363                 goto fail;
 1364         }
 1365 
 1366         error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
 1367             sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
 1368             XL_TX_LIST_SZ, xl_dma_map_addr,
 1369             &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
 1370         if (error) {
 1371                 device_printf(dev, "cannot get dma address of the tx ring!\n");
 1372                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1373                     sc->xl_ldata.xl_tx_dmamap);
 1374                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1375                 sc->xl_ldata.xl_tx_tag = NULL;
 1376                 goto fail;
 1377         }
 1378 
 1379         /*
 1380          * Allocate a DMA tag for the mapping of mbufs.
 1381          */
 1382         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
 1383             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1384             MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
 1385             NULL, &sc->xl_mtag);
 1386         if (error) {
 1387                 device_printf(dev, "failed to allocate mbuf dma tag\n");
 1388                 goto fail;
 1389         }
 1390 
 1391         /* We need a spare DMA map for the RX ring. */
 1392         error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
 1393         if (error)
 1394                 goto fail;
 1395 
 1396         /*
 1397          * Figure out the card type. 3c905B adapters have the
 1398          * 'supportsNoTxLength' bit set in the capabilities
 1399          * word in the EEPROM.
 1400          * Note: my 3c575C cardbus card lies. It returns a value
 1401          * of 0x1578 for its capabilities word, which is somewhat
 1402          * nonsensical. Another way to distinguish a 3c90x chip
 1403          * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
 1404          * bit. This will only be set for 3c90x boomerage chips.
 1405          */
 1406         xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
 1407         if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
 1408             !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
 1409                 sc->xl_type = XL_TYPE_905B;
 1410         else
 1411                 sc->xl_type = XL_TYPE_90X;
 1412 
 1413         /* Check availability of WOL. */
 1414         if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 &&
 1415             pci_find_extcap(dev, PCIY_PMG, &pmcap) == 0) {
 1416                 sc->xl_pmcap = pmcap;
 1417                 sc->xl_flags |= XL_FLAG_WOL;
 1418                 sinfo2 = 0;
 1419                 xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0);
 1420                 if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose)
 1421                         device_printf(dev,
 1422                             "No auxiliary remote wakeup connector!\n");
 1423         }
 1424 
 1425         /* Set the TX start threshold for best performance. */
 1426         sc->xl_tx_thresh = XL_MIN_FRAMELEN;
 1427 
 1428         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1429         ifp->if_ioctl = xl_ioctl;
 1430         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1431         if (sc->xl_type == XL_TYPE_905B) {
 1432                 ifp->if_hwassist = XL905B_CSUM_FEATURES;
 1433 #ifdef XL905B_TXCSUM_BROKEN
 1434                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1435 #else
 1436                 ifp->if_capabilities |= IFCAP_HWCSUM;
 1437 #endif
 1438         }
 1439         if ((sc->xl_flags & XL_FLAG_WOL) != 0)
 1440                 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
 1441         ifp->if_capenable = ifp->if_capabilities;
 1442 #ifdef DEVICE_POLLING
 1443         ifp->if_capabilities |= IFCAP_POLLING;
 1444 #endif
 1445         ifp->if_start = xl_start;
 1446         ifp->if_init = xl_init;
 1447         IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
 1448         ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
 1449         IFQ_SET_READY(&ifp->if_snd);
 1450 
 1451         /*
 1452          * Now we have to see what sort of media we have.
 1453          * This includes probing for an MII interace and a
 1454          * possible PHY.
 1455          */
 1456         XL_SEL_WIN(3);
 1457         sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
 1458         if (bootverbose)
 1459                 device_printf(dev, "media options word: %x\n", sc->xl_media);
 1460 
 1461         xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
 1462         sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
 1463         sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
 1464         sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
 1465 
 1466         xl_mediacheck(sc);
 1467 
 1468         if (sc->xl_media & XL_MEDIAOPT_MII ||
 1469             sc->xl_media & XL_MEDIAOPT_BTX ||
 1470             sc->xl_media & XL_MEDIAOPT_BT4) {
 1471                 if (bootverbose)
 1472                         device_printf(dev, "found MII/AUTO\n");
 1473                 xl_setcfg(sc);
 1474                 /*
 1475                  * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK.
 1476                  * This is to guard against problems with certain 3Com ASIC
 1477                  * revisions that incorrectly map the internal transceiver
 1478                  * control registers at all MII addresses.
 1479                  */
 1480                 phy = MII_PHY_ANY;
 1481                 if ((sc->xl_flags & XL_FLAG_PHYOK) == 0)
 1482                         phy = 24;
 1483                 error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd,
 1484                     xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
 1485                     sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0);
 1486                 if (error != 0) {
 1487                         device_printf(dev, "attaching PHYs failed\n");
 1488                         goto fail;
 1489                 }
 1490                 goto done;
 1491         }
 1492 
 1493         /*
 1494          * Sanity check. If the user has selected "auto" and this isn't
 1495          * a 10/100 card of some kind, we need to force the transceiver
 1496          * type to something sane.
 1497          */
 1498         if (sc->xl_xcvr == XL_XCVR_AUTO)
 1499                 xl_choose_xcvr(sc, bootverbose);
 1500 
 1501         /*
 1502          * Do ifmedia setup.
 1503          */
 1504         if (sc->xl_media & XL_MEDIAOPT_BT) {
 1505                 if (bootverbose)
 1506                         device_printf(dev, "found 10baseT\n");
 1507                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 1508                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
 1509                 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1510                         ifmedia_add(&sc->ifmedia,
 1511                             IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 1512         }
 1513 
 1514         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
 1515                 /*
 1516                  * Check for a 10baseFL board in disguise.
 1517                  */
 1518                 if (sc->xl_type == XL_TYPE_905B &&
 1519                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1520                         if (bootverbose)
 1521                                 device_printf(dev, "found 10baseFL\n");
 1522                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
 1523                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
 1524                             0, NULL);
 1525                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1526                                 ifmedia_add(&sc->ifmedia,
 1527                                     IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
 1528                 } else {
 1529                         if (bootverbose)
 1530                                 device_printf(dev, "found AUI\n");
 1531                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
 1532                 }
 1533         }
 1534 
 1535         if (sc->xl_media & XL_MEDIAOPT_BNC) {
 1536                 if (bootverbose)
 1537                         device_printf(dev, "found BNC\n");
 1538                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
 1539         }
 1540 
 1541         if (sc->xl_media & XL_MEDIAOPT_BFX) {
 1542                 if (bootverbose)
 1543                         device_printf(dev, "found 100baseFX\n");
 1544                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
 1545         }
 1546 
 1547         media = IFM_ETHER|IFM_100_TX|IFM_FDX;
 1548         xl_choose_media(sc, &media);
 1549 
 1550         if (sc->xl_miibus == NULL)
 1551                 ifmedia_set(&sc->ifmedia, media);
 1552 
 1553 done:
 1554         if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
 1555                 XL_SEL_WIN(0);
 1556                 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
 1557         }
 1558 
 1559         /*
 1560          * Call MI attach routine.
 1561          */
 1562         ether_ifattach(ifp, eaddr);
 1563 
 1564         error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1565             NULL, xl_intr, sc, &sc->xl_intrhand);
 1566         if (error) {
 1567                 device_printf(dev, "couldn't set up irq\n");
 1568                 ether_ifdetach(ifp);
 1569                 goto fail;
 1570         }
 1571 
 1572 fail:
 1573         if (error)
 1574                 xl_detach(dev);
 1575 
 1576         return (error);
 1577 }
 1578 
 1579 /*
 1580  * Choose a default media.
 1581  * XXX This is a leaf function only called by xl_attach() and
 1582  *     acquires/releases the non-recursible driver mutex to
 1583  *     satisfy lock assertions.
 1584  */
 1585 static void
 1586 xl_choose_media(struct xl_softc *sc, int *media)
 1587 {
 1588 
 1589         XL_LOCK(sc);
 1590 
 1591         switch (sc->xl_xcvr) {
 1592         case XL_XCVR_10BT:
 1593                 *media = IFM_ETHER|IFM_10_T;
 1594                 xl_setmode(sc, *media);
 1595                 break;
 1596         case XL_XCVR_AUI:
 1597                 if (sc->xl_type == XL_TYPE_905B &&
 1598                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1599                         *media = IFM_ETHER|IFM_10_FL;
 1600                         xl_setmode(sc, *media);
 1601                 } else {
 1602                         *media = IFM_ETHER|IFM_10_5;
 1603                         xl_setmode(sc, *media);
 1604                 }
 1605                 break;
 1606         case XL_XCVR_COAX:
 1607                 *media = IFM_ETHER|IFM_10_2;
 1608                 xl_setmode(sc, *media);
 1609                 break;
 1610         case XL_XCVR_AUTO:
 1611         case XL_XCVR_100BTX:
 1612         case XL_XCVR_MII:
 1613                 /* Chosen by miibus */
 1614                 break;
 1615         case XL_XCVR_100BFX:
 1616                 *media = IFM_ETHER|IFM_100_FX;
 1617                 break;
 1618         default:
 1619                 device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
 1620                     sc->xl_xcvr);
 1621                 /*
 1622                  * This will probably be wrong, but it prevents
 1623                  * the ifmedia code from panicking.
 1624                  */
 1625                 *media = IFM_ETHER|IFM_10_T;
 1626                 break;
 1627         }
 1628 
 1629         XL_UNLOCK(sc);
 1630 }
 1631 
 1632 /*
 1633  * Shutdown hardware and free up resources. This can be called any
 1634  * time after the mutex has been initialized. It is called in both
 1635  * the error case in attach and the normal detach case so it needs
 1636  * to be careful about only freeing resources that have actually been
 1637  * allocated.
 1638  */
 1639 static int
 1640 xl_detach(device_t dev)
 1641 {
 1642         struct xl_softc         *sc;
 1643         struct ifnet            *ifp;
 1644         int                     rid, res;
 1645 
 1646         sc = device_get_softc(dev);
 1647         ifp = sc->xl_ifp;
 1648 
 1649         KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
 1650 
 1651 #ifdef DEVICE_POLLING
 1652         if (ifp && ifp->if_capenable & IFCAP_POLLING)
 1653                 ether_poll_deregister(ifp);
 1654 #endif
 1655 
 1656         if (sc->xl_flags & XL_FLAG_USE_MMIO) {
 1657                 rid = XL_PCI_LOMEM;
 1658                 res = SYS_RES_MEMORY;
 1659         } else {
 1660                 rid = XL_PCI_LOIO;
 1661                 res = SYS_RES_IOPORT;
 1662         }
 1663 
 1664         /* These should only be active if attach succeeded */
 1665         if (device_is_attached(dev)) {
 1666                 XL_LOCK(sc);
 1667                 xl_stop(sc);
 1668                 XL_UNLOCK(sc);
 1669                 taskqueue_drain(taskqueue_swi, &sc->xl_task);
 1670                 callout_drain(&sc->xl_stat_callout);
 1671                 ether_ifdetach(ifp);
 1672         }
 1673         if (sc->xl_miibus)
 1674                 device_delete_child(dev, sc->xl_miibus);
 1675         bus_generic_detach(dev);
 1676         ifmedia_removeall(&sc->ifmedia);
 1677 
 1678         if (sc->xl_intrhand)
 1679                 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
 1680         if (sc->xl_irq)
 1681                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
 1682         if (sc->xl_fres != NULL)
 1683                 bus_release_resource(dev, SYS_RES_MEMORY,
 1684                     XL_PCI_FUNCMEM, sc->xl_fres);
 1685         if (sc->xl_res)
 1686                 bus_release_resource(dev, res, rid, sc->xl_res);
 1687 
 1688         if (ifp)
 1689                 if_free(ifp);
 1690 
 1691         if (sc->xl_mtag) {
 1692                 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
 1693                 bus_dma_tag_destroy(sc->xl_mtag);
 1694         }
 1695         if (sc->xl_ldata.xl_rx_tag) {
 1696                 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
 1697                     sc->xl_ldata.xl_rx_dmamap);
 1698                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1699                     sc->xl_ldata.xl_rx_dmamap);
 1700                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1701         }
 1702         if (sc->xl_ldata.xl_tx_tag) {
 1703                 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
 1704                     sc->xl_ldata.xl_tx_dmamap);
 1705                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1706                     sc->xl_ldata.xl_tx_dmamap);
 1707                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1708         }
 1709 
 1710         mtx_destroy(&sc->xl_mtx);
 1711 
 1712         return (0);
 1713 }
 1714 
 1715 /*
 1716  * Initialize the transmit descriptors.
 1717  */
 1718 static int
 1719 xl_list_tx_init(struct xl_softc *sc)
 1720 {
 1721         struct xl_chain_data    *cd;
 1722         struct xl_list_data     *ld;
 1723         int                     error, i;
 1724 
 1725         XL_LOCK_ASSERT(sc);
 1726 
 1727         cd = &sc->xl_cdata;
 1728         ld = &sc->xl_ldata;
 1729         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1730                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1731                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1732                     &cd->xl_tx_chain[i].xl_map);
 1733                 if (error)
 1734                         return (error);
 1735                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1736                     i * sizeof(struct xl_list);
 1737                 if (i == (XL_TX_LIST_CNT - 1))
 1738                         cd->xl_tx_chain[i].xl_next = NULL;
 1739                 else
 1740                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1741         }
 1742 
 1743         cd->xl_tx_free = &cd->xl_tx_chain[0];
 1744         cd->xl_tx_tail = cd->xl_tx_head = NULL;
 1745 
 1746         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1747         return (0);
 1748 }
 1749 
 1750 /*
 1751  * Initialize the transmit descriptors.
 1752  */
 1753 static int
 1754 xl_list_tx_init_90xB(struct xl_softc *sc)
 1755 {
 1756         struct xl_chain_data    *cd;
 1757         struct xl_list_data     *ld;
 1758         int                     error, i;
 1759 
 1760         XL_LOCK_ASSERT(sc);
 1761 
 1762         cd = &sc->xl_cdata;
 1763         ld = &sc->xl_ldata;
 1764         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1765                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1766                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1767                     &cd->xl_tx_chain[i].xl_map);
 1768                 if (error)
 1769                         return (error);
 1770                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1771                     i * sizeof(struct xl_list);
 1772                 if (i == (XL_TX_LIST_CNT - 1))
 1773                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
 1774                 else
 1775                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1776                 if (i == 0)
 1777                         cd->xl_tx_chain[i].xl_prev =
 1778                             &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
 1779                 else
 1780                         cd->xl_tx_chain[i].xl_prev =
 1781                             &cd->xl_tx_chain[i - 1];
 1782         }
 1783 
 1784         bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
 1785         ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
 1786 
 1787         cd->xl_tx_prod = 1;
 1788         cd->xl_tx_cons = 1;
 1789         cd->xl_tx_cnt = 0;
 1790 
 1791         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1792         return (0);
 1793 }
 1794 
 1795 /*
 1796  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1797  * we arrange the descriptors in a closed ring, so that the last descriptor
 1798  * points back to the first.
 1799  */
 1800 static int
 1801 xl_list_rx_init(struct xl_softc *sc)
 1802 {
 1803         struct xl_chain_data    *cd;
 1804         struct xl_list_data     *ld;
 1805         int                     error, i, next;
 1806         u_int32_t               nextptr;
 1807 
 1808         XL_LOCK_ASSERT(sc);
 1809 
 1810         cd = &sc->xl_cdata;
 1811         ld = &sc->xl_ldata;
 1812 
 1813         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1814                 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
 1815                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1816                     &cd->xl_rx_chain[i].xl_map);
 1817                 if (error)
 1818                         return (error);
 1819                 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
 1820                 if (error)
 1821                         return (error);
 1822                 if (i == (XL_RX_LIST_CNT - 1))
 1823                         next = 0;
 1824                 else
 1825                         next = i + 1;
 1826                 nextptr = ld->xl_rx_dmaaddr +
 1827                     next * sizeof(struct xl_list_onefrag);
 1828                 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
 1829                 ld->xl_rx_list[i].xl_next = htole32(nextptr);
 1830         }
 1831 
 1832         bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1833         cd->xl_rx_head = &cd->xl_rx_chain[0];
 1834 
 1835         return (0);
 1836 }
 1837 
 1838 /*
 1839  * Initialize an RX descriptor and attach an MBUF cluster.
 1840  * If we fail to do so, we need to leave the old mbuf and
 1841  * the old DMA map untouched so that it can be reused.
 1842  */
 1843 static int
 1844 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
 1845 {
 1846         struct mbuf             *m_new = NULL;
 1847         bus_dmamap_t            map;
 1848         bus_dma_segment_t       segs[1];
 1849         int                     error, nseg;
 1850 
 1851         XL_LOCK_ASSERT(sc);
 1852 
 1853         m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1854         if (m_new == NULL)
 1855                 return (ENOBUFS);
 1856 
 1857         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
 1858 
 1859         /* Force longword alignment for packet payload. */
 1860         m_adj(m_new, ETHER_ALIGN);
 1861 
 1862         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
 1863             segs, &nseg, BUS_DMA_NOWAIT);
 1864         if (error) {
 1865                 m_freem(m_new);
 1866                 device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
 1867                     error);
 1868                 return (error);
 1869         }
 1870         KASSERT(nseg == 1,
 1871             ("%s: too many DMA segments (%d)", __func__, nseg));
 1872 
 1873         bus_dmamap_unload(sc->xl_mtag, c->xl_map);
 1874         map = c->xl_map;
 1875         c->xl_map = sc->xl_tmpmap;
 1876         sc->xl_tmpmap = map;
 1877         c->xl_mbuf = m_new;
 1878         c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
 1879         c->xl_ptr->xl_status = 0;
 1880         c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
 1881         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
 1882         return (0);
 1883 }
 1884 
 1885 static int
 1886 xl_rx_resync(struct xl_softc *sc)
 1887 {
 1888         struct xl_chain_onefrag *pos;
 1889         int                     i;
 1890 
 1891         XL_LOCK_ASSERT(sc);
 1892 
 1893         pos = sc->xl_cdata.xl_rx_head;
 1894 
 1895         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1896                 if (pos->xl_ptr->xl_status)
 1897                         break;
 1898                 pos = pos->xl_next;
 1899         }
 1900 
 1901         if (i == XL_RX_LIST_CNT)
 1902                 return (0);
 1903 
 1904         sc->xl_cdata.xl_rx_head = pos;
 1905 
 1906         return (EAGAIN);
 1907 }
 1908 
 1909 /*
 1910  * A frame has been uploaded: pass the resulting mbuf chain up to
 1911  * the higher level protocols.
 1912  */
 1913 static void
 1914 xl_rxeof(struct xl_softc *sc)
 1915 {
 1916         struct mbuf             *m;
 1917         struct ifnet            *ifp = sc->xl_ifp;
 1918         struct xl_chain_onefrag *cur_rx;
 1919         int                     total_len = 0;
 1920         u_int32_t               rxstat;
 1921 
 1922         XL_LOCK_ASSERT(sc);
 1923 again:
 1924         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
 1925             BUS_DMASYNC_POSTREAD);
 1926         while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
 1927 #ifdef DEVICE_POLLING
 1928                 if (ifp->if_capenable & IFCAP_POLLING) {
 1929                         if (sc->rxcycles <= 0)
 1930                                 break;
 1931                         sc->rxcycles--;
 1932                 }
 1933 #endif
 1934                 cur_rx = sc->xl_cdata.xl_rx_head;
 1935                 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
 1936                 total_len = rxstat & XL_RXSTAT_LENMASK;
 1937 
 1938                 /*
 1939                  * Since we have told the chip to allow large frames,
 1940                  * we need to trap giant frame errors in software. We allow
 1941                  * a little more than the normal frame size to account for
 1942                  * frames with VLAN tags.
 1943                  */
 1944                 if (total_len > XL_MAX_FRAMELEN)
 1945                         rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
 1946 
 1947                 /*
 1948                  * If an error occurs, update stats, clear the
 1949                  * status word and leave the mbuf cluster in place:
 1950                  * it should simply get re-used next time this descriptor
 1951                  * comes up in the ring.
 1952                  */
 1953                 if (rxstat & XL_RXSTAT_UP_ERROR) {
 1954                         ifp->if_ierrors++;
 1955                         cur_rx->xl_ptr->xl_status = 0;
 1956                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1957                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1958                         continue;
 1959                 }
 1960 
 1961                 /*
 1962                  * If the error bit was not set, the upload complete
 1963                  * bit should be set which means we have a valid packet.
 1964                  * If not, something truly strange has happened.
 1965                  */
 1966                 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
 1967                         device_printf(sc->xl_dev,
 1968                             "bad receive status -- packet dropped\n");
 1969                         ifp->if_ierrors++;
 1970                         cur_rx->xl_ptr->xl_status = 0;
 1971                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1972                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1973                         continue;
 1974                 }
 1975 
 1976                 /* No errors; receive the packet. */
 1977                 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
 1978                     BUS_DMASYNC_POSTREAD);
 1979                 m = cur_rx->xl_mbuf;
 1980 
 1981                 /*
 1982                  * Try to conjure up a new mbuf cluster. If that
 1983                  * fails, it means we have an out of memory condition and
 1984                  * should leave the buffer in place and continue. This will
 1985                  * result in a lost packet, but there's little else we
 1986                  * can do in this situation.
 1987                  */
 1988                 if (xl_newbuf(sc, cur_rx)) {
 1989                         ifp->if_ierrors++;
 1990                         cur_rx->xl_ptr->xl_status = 0;
 1991                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1992                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1993                         continue;
 1994                 }
 1995                 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1996                     sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1997 
 1998                 ifp->if_ipackets++;
 1999                 m->m_pkthdr.rcvif = ifp;
 2000                 m->m_pkthdr.len = m->m_len = total_len;
 2001 
 2002                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2003                         /* Do IP checksum checking. */
 2004                         if (rxstat & XL_RXSTAT_IPCKOK)
 2005                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2006                         if (!(rxstat & XL_RXSTAT_IPCKERR))
 2007                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2008                         if ((rxstat & XL_RXSTAT_TCPCOK &&
 2009                              !(rxstat & XL_RXSTAT_TCPCKERR)) ||
 2010                             (rxstat & XL_RXSTAT_UDPCKOK &&
 2011                              !(rxstat & XL_RXSTAT_UDPCKERR))) {
 2012                                 m->m_pkthdr.csum_flags |=
 2013                                         CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 2014                                 m->m_pkthdr.csum_data = 0xffff;
 2015                         }
 2016                 }
 2017 
 2018                 XL_UNLOCK(sc);
 2019                 (*ifp->if_input)(ifp, m);
 2020                 XL_LOCK(sc);
 2021 
 2022                 /*
 2023                  * If we are running from the taskqueue, the interface
 2024                  * might have been stopped while we were passing the last
 2025                  * packet up the network stack.
 2026                  */
 2027                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 2028                         return;
 2029         }
 2030 
 2031         /*
 2032          * Handle the 'end of channel' condition. When the upload
 2033          * engine hits the end of the RX ring, it will stall. This
 2034          * is our cue to flush the RX ring, reload the uplist pointer
 2035          * register and unstall the engine.
 2036          * XXX This is actually a little goofy. With the ThunderLAN
 2037          * chip, you get an interrupt when the receiver hits the end
 2038          * of the receive ring, which tells you exactly when you
 2039          * you need to reload the ring pointer. Here we have to
 2040          * fake it. I'm mad at myself for not being clever enough
 2041          * to avoid the use of a goto here.
 2042          */
 2043         if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
 2044                 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
 2045                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2046                 xl_wait(sc);
 2047                 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2048                 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
 2049                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2050                 goto again;
 2051         }
 2052 }
 2053 
 2054 /*
 2055  * Taskqueue wrapper for xl_rxeof().
 2056  */
 2057 static void
 2058 xl_rxeof_task(void *arg, int pending)
 2059 {
 2060         struct xl_softc *sc = (struct xl_softc *)arg;
 2061 
 2062         XL_LOCK(sc);
 2063         if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
 2064                 xl_rxeof(sc);
 2065         XL_UNLOCK(sc);
 2066 }
 2067 
 2068 /*
 2069  * A frame was downloaded to the chip. It's safe for us to clean up
 2070  * the list buffers.
 2071  */
 2072 static void
 2073 xl_txeof(struct xl_softc *sc)
 2074 {
 2075         struct xl_chain         *cur_tx;
 2076         struct ifnet            *ifp = sc->xl_ifp;
 2077 
 2078         XL_LOCK_ASSERT(sc);
 2079 
 2080         /*
 2081          * Go through our tx list and free mbufs for those
 2082          * frames that have been uploaded. Note: the 3c905B
 2083          * sets a special bit in the status word to let us
 2084          * know that a frame has been downloaded, but the
 2085          * original 3c900/3c905 adapters don't do that.
 2086          * Consequently, we have to use a different test if
 2087          * xl_type != XL_TYPE_905B.
 2088          */
 2089         while (sc->xl_cdata.xl_tx_head != NULL) {
 2090                 cur_tx = sc->xl_cdata.xl_tx_head;
 2091 
 2092                 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2093                         break;
 2094 
 2095                 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
 2096                 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2097                     BUS_DMASYNC_POSTWRITE);
 2098                 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2099                 m_freem(cur_tx->xl_mbuf);
 2100                 cur_tx->xl_mbuf = NULL;
 2101                 ifp->if_opackets++;
 2102                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2103 
 2104                 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
 2105                 sc->xl_cdata.xl_tx_free = cur_tx;
 2106         }
 2107 
 2108         if (sc->xl_cdata.xl_tx_head == NULL) {
 2109                 sc->xl_wdog_timer = 0;
 2110                 sc->xl_cdata.xl_tx_tail = NULL;
 2111         } else {
 2112                 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
 2113                         !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
 2114                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2115                                 sc->xl_cdata.xl_tx_head->xl_phys);
 2116                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2117                 }
 2118         }
 2119 }
 2120 
 2121 static void
 2122 xl_txeof_90xB(struct xl_softc *sc)
 2123 {
 2124         struct xl_chain         *cur_tx = NULL;
 2125         struct ifnet            *ifp = sc->xl_ifp;
 2126         int                     idx;
 2127 
 2128         XL_LOCK_ASSERT(sc);
 2129 
 2130         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2131             BUS_DMASYNC_POSTREAD);
 2132         idx = sc->xl_cdata.xl_tx_cons;
 2133         while (idx != sc->xl_cdata.xl_tx_prod) {
 2134                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2135 
 2136                 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
 2137                       XL_TXSTAT_DL_COMPLETE))
 2138                         break;
 2139 
 2140                 if (cur_tx->xl_mbuf != NULL) {
 2141                         bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2142                             BUS_DMASYNC_POSTWRITE);
 2143                         bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2144                         m_freem(cur_tx->xl_mbuf);
 2145                         cur_tx->xl_mbuf = NULL;
 2146                 }
 2147 
 2148                 ifp->if_opackets++;
 2149 
 2150                 sc->xl_cdata.xl_tx_cnt--;
 2151                 XL_INC(idx, XL_TX_LIST_CNT);
 2152         }
 2153 
 2154         if (sc->xl_cdata.xl_tx_cnt == 0)
 2155                 sc->xl_wdog_timer = 0;
 2156         sc->xl_cdata.xl_tx_cons = idx;
 2157 
 2158         if (cur_tx != NULL)
 2159                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2160 }
 2161 
 2162 /*
 2163  * TX 'end of channel' interrupt handler. Actually, we should
 2164  * only get a 'TX complete' interrupt if there's a transmit error,
 2165  * so this is really TX error handler.
 2166  */
 2167 static void
 2168 xl_txeoc(struct xl_softc *sc)
 2169 {
 2170         u_int8_t                txstat;
 2171 
 2172         XL_LOCK_ASSERT(sc);
 2173 
 2174         while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
 2175                 if (txstat & XL_TXSTATUS_UNDERRUN ||
 2176                         txstat & XL_TXSTATUS_JABBER ||
 2177                         txstat & XL_TXSTATUS_RECLAIM) {
 2178                         device_printf(sc->xl_dev,
 2179                             "transmission error: %x\n", txstat);
 2180                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2181                         xl_wait(sc);
 2182                         if (sc->xl_type == XL_TYPE_905B) {
 2183                                 if (sc->xl_cdata.xl_tx_cnt) {
 2184                                         int                     i;
 2185                                         struct xl_chain         *c;
 2186 
 2187                                         i = sc->xl_cdata.xl_tx_cons;
 2188                                         c = &sc->xl_cdata.xl_tx_chain[i];
 2189                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2190                                             c->xl_phys);
 2191                                         CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2192                                 }
 2193                         } else {
 2194                                 if (sc->xl_cdata.xl_tx_head != NULL)
 2195                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2196                                             sc->xl_cdata.xl_tx_head->xl_phys);
 2197                         }
 2198                         /*
 2199                          * Remember to set this for the
 2200                          * first generation 3c90X chips.
 2201                          */
 2202                         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2203                         if (txstat & XL_TXSTATUS_UNDERRUN &&
 2204                             sc->xl_tx_thresh < XL_PACKET_SIZE) {
 2205                                 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
 2206                                 device_printf(sc->xl_dev,
 2207 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
 2208                         }
 2209                         CSR_WRITE_2(sc, XL_COMMAND,
 2210                             XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2211                         if (sc->xl_type == XL_TYPE_905B) {
 2212                                 CSR_WRITE_2(sc, XL_COMMAND,
 2213                                 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2214                         }
 2215                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2216                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2217                 } else {
 2218                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2219                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2220                 }
 2221                 /*
 2222                  * Write an arbitrary byte to the TX_STATUS register
 2223                  * to clear this interrupt/error and advance to the next.
 2224                  */
 2225                 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
 2226         }
 2227 }
 2228 
 2229 static void
 2230 xl_intr(void *arg)
 2231 {
 2232         struct xl_softc         *sc = arg;
 2233         struct ifnet            *ifp = sc->xl_ifp;
 2234         u_int16_t               status;
 2235 
 2236         XL_LOCK(sc);
 2237 
 2238 #ifdef DEVICE_POLLING
 2239         if (ifp->if_capenable & IFCAP_POLLING) {
 2240                 XL_UNLOCK(sc);
 2241                 return;
 2242         }
 2243 #endif
 2244 
 2245         while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
 2246             status != 0xFFFF) {
 2247                 CSR_WRITE_2(sc, XL_COMMAND,
 2248                     XL_CMD_INTR_ACK|(status & XL_INTRS));
 2249 
 2250                 if (status & XL_STAT_UP_COMPLETE) {
 2251                         int     curpkts;
 2252 
 2253                         curpkts = ifp->if_ipackets;
 2254                         xl_rxeof(sc);
 2255                         if (curpkts == ifp->if_ipackets) {
 2256                                 while (xl_rx_resync(sc))
 2257                                         xl_rxeof(sc);
 2258                         }
 2259                 }
 2260 
 2261                 if (status & XL_STAT_DOWN_COMPLETE) {
 2262                         if (sc->xl_type == XL_TYPE_905B)
 2263                                 xl_txeof_90xB(sc);
 2264                         else
 2265                                 xl_txeof(sc);
 2266                 }
 2267 
 2268                 if (status & XL_STAT_TX_COMPLETE) {
 2269                         ifp->if_oerrors++;
 2270                         xl_txeoc(sc);
 2271                 }
 2272 
 2273                 if (status & XL_STAT_ADFAIL) {
 2274                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2275                         xl_init_locked(sc);
 2276                 }
 2277 
 2278                 if (status & XL_STAT_STATSOFLOW) {
 2279                         sc->xl_stats_no_timeout = 1;
 2280                         xl_stats_update_locked(sc);
 2281                         sc->xl_stats_no_timeout = 0;
 2282                 }
 2283         }
 2284 
 2285         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2286                 if (sc->xl_type == XL_TYPE_905B)
 2287                         xl_start_90xB_locked(ifp);
 2288                 else
 2289                         xl_start_locked(ifp);
 2290         }
 2291 
 2292         XL_UNLOCK(sc);
 2293 }
 2294 
 2295 #ifdef DEVICE_POLLING
 2296 static void
 2297 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2298 {
 2299         struct xl_softc *sc = ifp->if_softc;
 2300 
 2301         XL_LOCK(sc);
 2302         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2303                 xl_poll_locked(ifp, cmd, count);
 2304         XL_UNLOCK(sc);
 2305 }
 2306 
 2307 static void
 2308 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2309 {
 2310         struct xl_softc *sc = ifp->if_softc;
 2311 
 2312         XL_LOCK_ASSERT(sc);
 2313 
 2314         sc->rxcycles = count;
 2315         xl_rxeof(sc);
 2316         if (sc->xl_type == XL_TYPE_905B)
 2317                 xl_txeof_90xB(sc);
 2318         else
 2319                 xl_txeof(sc);
 2320 
 2321         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2322                 if (sc->xl_type == XL_TYPE_905B)
 2323                         xl_start_90xB_locked(ifp);
 2324                 else
 2325                         xl_start_locked(ifp);
 2326         }
 2327 
 2328         if (cmd == POLL_AND_CHECK_STATUS) {
 2329                 u_int16_t status;
 2330 
 2331                 status = CSR_READ_2(sc, XL_STATUS);
 2332                 if (status & XL_INTRS && status != 0xFFFF) {
 2333                         CSR_WRITE_2(sc, XL_COMMAND,
 2334                             XL_CMD_INTR_ACK|(status & XL_INTRS));
 2335 
 2336                         if (status & XL_STAT_TX_COMPLETE) {
 2337                                 ifp->if_oerrors++;
 2338                                 xl_txeoc(sc);
 2339                         }
 2340 
 2341                         if (status & XL_STAT_ADFAIL) {
 2342                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2343                                 xl_init_locked(sc);
 2344                         }
 2345 
 2346                         if (status & XL_STAT_STATSOFLOW) {
 2347                                 sc->xl_stats_no_timeout = 1;
 2348                                 xl_stats_update_locked(sc);
 2349                                 sc->xl_stats_no_timeout = 0;
 2350                         }
 2351                 }
 2352         }
 2353 }
 2354 #endif /* DEVICE_POLLING */
 2355 
 2356 /*
 2357  * XXX: This is an entry point for callout which needs to take the lock.
 2358  */
 2359 static void
 2360 xl_stats_update(void *xsc)
 2361 {
 2362         struct xl_softc *sc = xsc;
 2363 
 2364         XL_LOCK_ASSERT(sc);
 2365 
 2366         if (xl_watchdog(sc) == EJUSTRETURN)
 2367                 return;
 2368 
 2369         xl_stats_update_locked(sc);
 2370 }
 2371 
 2372 static void
 2373 xl_stats_update_locked(struct xl_softc *sc)
 2374 {
 2375         struct ifnet            *ifp = sc->xl_ifp;
 2376         struct xl_stats         xl_stats;
 2377         u_int8_t                *p;
 2378         int                     i;
 2379         struct mii_data         *mii = NULL;
 2380 
 2381         XL_LOCK_ASSERT(sc);
 2382 
 2383         bzero((char *)&xl_stats, sizeof(struct xl_stats));
 2384 
 2385         if (sc->xl_miibus != NULL)
 2386                 mii = device_get_softc(sc->xl_miibus);
 2387 
 2388         p = (u_int8_t *)&xl_stats;
 2389 
 2390         /* Read all the stats registers. */
 2391         XL_SEL_WIN(6);
 2392 
 2393         for (i = 0; i < 16; i++)
 2394                 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
 2395 
 2396         ifp->if_ierrors += xl_stats.xl_rx_overrun;
 2397 
 2398         ifp->if_collisions += xl_stats.xl_tx_multi_collision +
 2399             xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
 2400 
 2401         /*
 2402          * Boomerang and cyclone chips have an extra stats counter
 2403          * in window 4 (BadSSD). We have to read this too in order
 2404          * to clear out all the stats registers and avoid a statsoflow
 2405          * interrupt.
 2406          */
 2407         XL_SEL_WIN(4);
 2408         CSR_READ_1(sc, XL_W4_BADSSD);
 2409 
 2410         if ((mii != NULL) && (!sc->xl_stats_no_timeout))
 2411                 mii_tick(mii);
 2412 
 2413         XL_SEL_WIN(7);
 2414 
 2415         if (!sc->xl_stats_no_timeout)
 2416                 callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 2417 }
 2418 
 2419 /*
 2420  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 2421  * pointers to the fragment pointers.
 2422  */
 2423 static int
 2424 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
 2425 {
 2426         struct mbuf             *m_new;
 2427         struct ifnet            *ifp = sc->xl_ifp;
 2428         int                     error, i, nseg, total_len;
 2429         u_int32_t               status;
 2430 
 2431         XL_LOCK_ASSERT(sc);
 2432 
 2433         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
 2434             sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2435 
 2436         if (error && error != EFBIG) {
 2437                 if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2438                 return (error);
 2439         }
 2440 
 2441         /*
 2442          * Handle special case: we used up all 63 fragments,
 2443          * but we have more mbufs left in the chain. Copy the
 2444          * data into an mbuf cluster. Note that we don't
 2445          * bother clearing the values in the other fragment
 2446          * pointers/counters; it wouldn't gain us anything,
 2447          * and would waste cycles.
 2448          */
 2449         if (error) {
 2450                 m_new = m_collapse(*m_head, M_DONTWAIT, XL_MAXFRAGS);
 2451                 if (m_new == NULL) {
 2452                         m_freem(*m_head);
 2453                         *m_head = NULL;
 2454                         return (ENOBUFS);
 2455                 }
 2456                 *m_head = m_new;
 2457 
 2458                 error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
 2459                     *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2460                 if (error) {
 2461                         m_freem(*m_head);
 2462                         *m_head = NULL;
 2463                         if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2464                         return (error);
 2465                 }
 2466         }
 2467 
 2468         KASSERT(nseg <= XL_MAXFRAGS,
 2469             ("%s: too many DMA segments (%d)", __func__, nseg));
 2470         if (nseg == 0) {
 2471                 m_freem(*m_head);
 2472                 *m_head = NULL;
 2473                 return (EIO);
 2474         }
 2475 
 2476         total_len = 0;
 2477         for (i = 0; i < nseg; i++) {
 2478                 KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
 2479                     ("segment size too large"));
 2480                 c->xl_ptr->xl_frag[i].xl_addr =
 2481                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
 2482                 c->xl_ptr->xl_frag[i].xl_len =
 2483                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
 2484                 total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
 2485         }
 2486         c->xl_ptr->xl_frag[nseg - 1].xl_len =
 2487             htole32(sc->xl_cdata.xl_tx_segs[nseg - 1].ds_len | XL_LAST_FRAG);
 2488         c->xl_ptr->xl_status = htole32(total_len);
 2489         c->xl_ptr->xl_next = 0;
 2490 
 2491         if (sc->xl_type == XL_TYPE_905B) {
 2492                 status = XL_TXSTAT_RND_DEFEAT;
 2493 
 2494 #ifndef XL905B_TXCSUM_BROKEN
 2495                 if ((*m_head)->m_pkthdr.csum_flags) {
 2496                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
 2497                                 status |= XL_TXSTAT_IPCKSUM;
 2498                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
 2499                                 status |= XL_TXSTAT_TCPCKSUM;
 2500                         if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
 2501                                 status |= XL_TXSTAT_UDPCKSUM;
 2502                 }
 2503 #endif
 2504                 c->xl_ptr->xl_status = htole32(status);
 2505         }
 2506 
 2507         c->xl_mbuf = *m_head;
 2508         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
 2509         return (0);
 2510 }
 2511 
 2512 /*
 2513  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 2514  * to the mbuf data regions directly in the transmit lists. We also save a
 2515  * copy of the pointers since the transmit list fragment pointers are
 2516  * physical addresses.
 2517  */
 2518 
 2519 static void
 2520 xl_start(struct ifnet *ifp)
 2521 {
 2522         struct xl_softc         *sc = ifp->if_softc;
 2523 
 2524         XL_LOCK(sc);
 2525 
 2526         if (sc->xl_type == XL_TYPE_905B)
 2527                 xl_start_90xB_locked(ifp);
 2528         else
 2529                 xl_start_locked(ifp);
 2530 
 2531         XL_UNLOCK(sc);
 2532 }
 2533 
 2534 static void
 2535 xl_start_locked(struct ifnet *ifp)
 2536 {
 2537         struct xl_softc         *sc = ifp->if_softc;
 2538         struct mbuf             *m_head = NULL;
 2539         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2540         u_int32_t               status;
 2541         int                     error;
 2542 
 2543         XL_LOCK_ASSERT(sc);
 2544 
 2545         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2546             IFF_DRV_RUNNING)
 2547                 return;
 2548         /*
 2549          * Check for an available queue slot. If there are none,
 2550          * punt.
 2551          */
 2552         if (sc->xl_cdata.xl_tx_free == NULL) {
 2553                 xl_txeoc(sc);
 2554                 xl_txeof(sc);
 2555                 if (sc->xl_cdata.xl_tx_free == NULL) {
 2556                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2557                         return;
 2558                 }
 2559         }
 2560 
 2561         start_tx = sc->xl_cdata.xl_tx_free;
 2562 
 2563         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2564             sc->xl_cdata.xl_tx_free != NULL;) {
 2565                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2566                 if (m_head == NULL)
 2567                         break;
 2568 
 2569                 /* Pick a descriptor off the free list. */
 2570                 cur_tx = sc->xl_cdata.xl_tx_free;
 2571 
 2572                 /* Pack the data into the descriptor. */
 2573                 error = xl_encap(sc, cur_tx, &m_head);
 2574                 if (error) {
 2575                         if (m_head == NULL)
 2576                                 break;
 2577                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2578                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2579                         break;
 2580                 }
 2581 
 2582                 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
 2583                 cur_tx->xl_next = NULL;
 2584 
 2585                 /* Chain it together. */
 2586                 if (prev != NULL) {
 2587                         prev->xl_next = cur_tx;
 2588                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2589                 }
 2590                 prev = cur_tx;
 2591 
 2592                 /*
 2593                  * If there's a BPF listener, bounce a copy of this frame
 2594                  * to him.
 2595                  */
 2596                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2597         }
 2598 
 2599         /*
 2600          * If there are no packets queued, bail.
 2601          */
 2602         if (cur_tx == NULL)
 2603                 return;
 2604 
 2605         /*
 2606          * Place the request for the upload interrupt
 2607          * in the last descriptor in the chain. This way, if
 2608          * we're chaining several packets at once, we'll only
 2609          * get an interrupt once for the whole chain rather than
 2610          * once for each packet.
 2611          */
 2612         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2613             XL_TXSTAT_DL_INTR);
 2614         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2615             BUS_DMASYNC_PREWRITE);
 2616 
 2617         /*
 2618          * Queue the packets. If the TX channel is clear, update
 2619          * the downlist pointer register.
 2620          */
 2621         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2622         xl_wait(sc);
 2623 
 2624         if (sc->xl_cdata.xl_tx_head != NULL) {
 2625                 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
 2626                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
 2627                     htole32(start_tx->xl_phys);
 2628                 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
 2629                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
 2630                     htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
 2631                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2632         } else {
 2633                 sc->xl_cdata.xl_tx_head = start_tx;
 2634                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2635         }
 2636         if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2637                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
 2638 
 2639         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2640 
 2641         XL_SEL_WIN(7);
 2642 
 2643         /*
 2644          * Set a timeout in case the chip goes out to lunch.
 2645          */
 2646         sc->xl_wdog_timer = 5;
 2647 
 2648         /*
 2649          * XXX Under certain conditions, usually on slower machines
 2650          * where interrupts may be dropped, it's possible for the
 2651          * adapter to chew up all the buffers in the receive ring
 2652          * and stall, without us being able to do anything about it.
 2653          * To guard against this, we need to make a pass over the
 2654          * RX queue to make sure there aren't any packets pending.
 2655          * Doing it here means we can flush the receive ring at the
 2656          * same time the chip is DMAing the transmit descriptors we
 2657          * just gave it.
 2658          *
 2659          * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
 2660          * nature of their chips in all their marketing literature;
 2661          * we may as well take advantage of it. :)
 2662          */
 2663         taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
 2664 }
 2665 
 2666 static void
 2667 xl_start_90xB_locked(struct ifnet *ifp)
 2668 {
 2669         struct xl_softc         *sc = ifp->if_softc;
 2670         struct mbuf             *m_head = NULL;
 2671         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2672         int                     error, idx;
 2673 
 2674         XL_LOCK_ASSERT(sc);
 2675 
 2676         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2677             IFF_DRV_RUNNING)
 2678                 return;
 2679 
 2680         idx = sc->xl_cdata.xl_tx_prod;
 2681         start_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2682 
 2683         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2684             sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
 2685                 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
 2686                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2687                         break;
 2688                 }
 2689 
 2690                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2691                 if (m_head == NULL)
 2692                         break;
 2693 
 2694                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2695 
 2696                 /* Pack the data into the descriptor. */
 2697                 error = xl_encap(sc, cur_tx, &m_head);
 2698                 if (error) {
 2699                         if (m_head == NULL)
 2700                                 break;
 2701                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2702                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2703                         break;
 2704                 }
 2705 
 2706                 /* Chain it together. */
 2707                 if (prev != NULL)
 2708                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2709                 prev = cur_tx;
 2710 
 2711                 /*
 2712                  * If there's a BPF listener, bounce a copy of this frame
 2713                  * to him.
 2714                  */
 2715                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2716 
 2717                 XL_INC(idx, XL_TX_LIST_CNT);
 2718                 sc->xl_cdata.xl_tx_cnt++;
 2719         }
 2720 
 2721         /*
 2722          * If there are no packets queued, bail.
 2723          */
 2724         if (cur_tx == NULL)
 2725                 return;
 2726 
 2727         /*
 2728          * Place the request for the upload interrupt
 2729          * in the last descriptor in the chain. This way, if
 2730          * we're chaining several packets at once, we'll only
 2731          * get an interrupt once for the whole chain rather than
 2732          * once for each packet.
 2733          */
 2734         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2735             XL_TXSTAT_DL_INTR);
 2736         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2737             BUS_DMASYNC_PREWRITE);
 2738 
 2739         /* Start transmission */
 2740         sc->xl_cdata.xl_tx_prod = idx;
 2741         start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
 2742 
 2743         /*
 2744          * Set a timeout in case the chip goes out to lunch.
 2745          */
 2746         sc->xl_wdog_timer = 5;
 2747 }
 2748 
 2749 static void
 2750 xl_init(void *xsc)
 2751 {
 2752         struct xl_softc         *sc = xsc;
 2753 
 2754         XL_LOCK(sc);
 2755         xl_init_locked(sc);
 2756         XL_UNLOCK(sc);
 2757 }
 2758 
 2759 static void
 2760 xl_init_locked(struct xl_softc *sc)
 2761 {
 2762         struct ifnet            *ifp = sc->xl_ifp;
 2763         int                     error, i;
 2764         u_int16_t               rxfilt = 0;
 2765         struct mii_data         *mii = NULL;
 2766 
 2767         XL_LOCK_ASSERT(sc);
 2768 
 2769         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 2770                 return;
 2771         /*
 2772          * Cancel pending I/O and free all RX/TX buffers.
 2773          */
 2774         xl_stop(sc);
 2775 
 2776         /* Reset the chip to a known state. */
 2777         xl_reset(sc);
 2778 
 2779         if (sc->xl_miibus == NULL) {
 2780                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2781                 xl_wait(sc);
 2782         }
 2783         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2784         xl_wait(sc);
 2785         DELAY(10000);
 2786 
 2787         if (sc->xl_miibus != NULL)
 2788                 mii = device_get_softc(sc->xl_miibus);
 2789 
 2790         /*
 2791          * Clear WOL status and disable all WOL feature as WOL
 2792          * would interfere Rx operation under normal environments.
 2793          */
 2794         if ((sc->xl_flags & XL_FLAG_WOL) != 0) {
 2795                 XL_SEL_WIN(7);
 2796                 CSR_READ_2(sc, XL_W7_BM_PME);
 2797                 CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
 2798         }
 2799         /* Init our MAC address */
 2800         XL_SEL_WIN(2);
 2801         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2802                 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
 2803                                 IF_LLADDR(sc->xl_ifp)[i]);
 2804         }
 2805 
 2806         /* Clear the station mask. */
 2807         for (i = 0; i < 3; i++)
 2808                 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
 2809 #ifdef notdef
 2810         /* Reset TX and RX. */
 2811         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2812         xl_wait(sc);
 2813         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2814         xl_wait(sc);
 2815 #endif
 2816         /* Init circular RX list. */
 2817         error = xl_list_rx_init(sc);
 2818         if (error) {
 2819                 device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
 2820                     error);
 2821                 xl_stop(sc);
 2822                 return;
 2823         }
 2824 
 2825         /* Init TX descriptors. */
 2826         if (sc->xl_type == XL_TYPE_905B)
 2827                 error = xl_list_tx_init_90xB(sc);
 2828         else
 2829                 error = xl_list_tx_init(sc);
 2830         if (error) {
 2831                 device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
 2832                     error);
 2833                 xl_stop(sc);
 2834                 return;
 2835         }
 2836 
 2837         /*
 2838          * Set the TX freethresh value.
 2839          * Note that this has no effect on 3c905B "cyclone"
 2840          * cards but is required for 3c900/3c905 "boomerang"
 2841          * cards in order to enable the download engine.
 2842          */
 2843         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2844 
 2845         /* Set the TX start threshold for best performance. */
 2846         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2847 
 2848         /*
 2849          * If this is a 3c905B, also set the tx reclaim threshold.
 2850          * This helps cut down on the number of tx reclaim errors
 2851          * that could happen on a busy network. The chip multiplies
 2852          * the register value by 16 to obtain the actual threshold
 2853          * in bytes, so we divide by 16 when setting the value here.
 2854          * The existing threshold value can be examined by reading
 2855          * the register at offset 9 in window 5.
 2856          */
 2857         if (sc->xl_type == XL_TYPE_905B) {
 2858                 CSR_WRITE_2(sc, XL_COMMAND,
 2859                     XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2860         }
 2861 
 2862         /* Set RX filter bits. */
 2863         XL_SEL_WIN(5);
 2864         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 2865 
 2866         /* Set the individual bit to receive frames for this host only. */
 2867         rxfilt |= XL_RXFILTER_INDIVIDUAL;
 2868 
 2869         /* If we want promiscuous mode, set the allframes bit. */
 2870         if (ifp->if_flags & IFF_PROMISC) {
 2871                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 2872                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2873         } else {
 2874                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 2875                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2876         }
 2877 
 2878         /*
 2879          * Set capture broadcast bit to capture broadcast frames.
 2880          */
 2881         if (ifp->if_flags & IFF_BROADCAST) {
 2882                 rxfilt |= XL_RXFILTER_BROADCAST;
 2883                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2884         } else {
 2885                 rxfilt &= ~XL_RXFILTER_BROADCAST;
 2886                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2887         }
 2888 
 2889         /*
 2890          * Program the multicast filter, if necessary.
 2891          */
 2892         if (sc->xl_type == XL_TYPE_905B)
 2893                 xl_setmulti_hash(sc);
 2894         else
 2895                 xl_setmulti(sc);
 2896 
 2897         /*
 2898          * Load the address of the RX list. We have to
 2899          * stall the upload engine before we can manipulate
 2900          * the uplist pointer register, then unstall it when
 2901          * we're finished. We also have to wait for the
 2902          * stall command to complete before proceeding.
 2903          * Note that we have to do this after any RX resets
 2904          * have completed since the uplist register is cleared
 2905          * by a reset.
 2906          */
 2907         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2908         xl_wait(sc);
 2909         CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2910         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2911         xl_wait(sc);
 2912 
 2913         if (sc->xl_type == XL_TYPE_905B) {
 2914                 /* Set polling interval */
 2915                 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2916                 /* Load the address of the TX list */
 2917                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2918                 xl_wait(sc);
 2919                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2920                     sc->xl_cdata.xl_tx_chain[0].xl_phys);
 2921                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2922                 xl_wait(sc);
 2923         }
 2924 
 2925         /*
 2926          * If the coax transceiver is on, make sure to enable
 2927          * the DC-DC converter.
 2928          */
 2929         XL_SEL_WIN(3);
 2930         if (sc->xl_xcvr == XL_XCVR_COAX)
 2931                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
 2932         else
 2933                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 2934 
 2935         /*
 2936          * increase packet size to allow reception of 802.1q or ISL packets.
 2937          * For the 3c90x chip, set the 'allow large packets' bit in the MAC
 2938          * control register. For 3c90xB/C chips, use the RX packet size
 2939          * register.
 2940          */
 2941 
 2942         if (sc->xl_type == XL_TYPE_905B)
 2943                 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
 2944         else {
 2945                 u_int8_t macctl;
 2946                 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
 2947                 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
 2948                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
 2949         }
 2950 
 2951         /* Clear out the stats counters. */
 2952         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 2953         sc->xl_stats_no_timeout = 1;
 2954         xl_stats_update_locked(sc);
 2955         sc->xl_stats_no_timeout = 0;
 2956         XL_SEL_WIN(4);
 2957         CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
 2958         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
 2959 
 2960         /*
 2961          * Enable interrupts.
 2962          */
 2963         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 2964         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
 2965 #ifdef DEVICE_POLLING
 2966         /* Disable interrupts if we are polling. */
 2967         if (ifp->if_capenable & IFCAP_POLLING)
 2968                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 2969         else
 2970 #endif
 2971         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 2972         if (sc->xl_flags & XL_FLAG_FUNCREG)
 2973             bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 2974 
 2975         /* Set the RX early threshold */
 2976         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
 2977         CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
 2978 
 2979         /* Enable receiver and transmitter. */
 2980         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2981         xl_wait(sc);
 2982         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 2983         xl_wait(sc);
 2984 
 2985         /* XXX Downcall to miibus. */
 2986         if (mii != NULL)
 2987                 mii_mediachg(mii);
 2988 
 2989         /* Select window 7 for normal operations. */
 2990         XL_SEL_WIN(7);
 2991 
 2992         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2993         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2994 
 2995         sc->xl_wdog_timer = 0;
 2996         callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 2997 }
 2998 
 2999 /*
 3000  * Set media options.
 3001  */
 3002 static int
 3003 xl_ifmedia_upd(struct ifnet *ifp)
 3004 {
 3005         struct xl_softc         *sc = ifp->if_softc;
 3006         struct ifmedia          *ifm = NULL;
 3007         struct mii_data         *mii = NULL;
 3008 
 3009         XL_LOCK(sc);
 3010 
 3011         if (sc->xl_miibus != NULL)
 3012                 mii = device_get_softc(sc->xl_miibus);
 3013         if (mii == NULL)
 3014                 ifm = &sc->ifmedia;
 3015         else
 3016                 ifm = &mii->mii_media;
 3017 
 3018         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 3019         case IFM_100_FX:
 3020         case IFM_10_FL:
 3021         case IFM_10_2:
 3022         case IFM_10_5:
 3023                 xl_setmode(sc, ifm->ifm_media);
 3024                 XL_UNLOCK(sc);
 3025                 return (0);
 3026         }
 3027 
 3028         if (sc->xl_media & XL_MEDIAOPT_MII ||
 3029             sc->xl_media & XL_MEDIAOPT_BTX ||
 3030             sc->xl_media & XL_MEDIAOPT_BT4) {
 3031                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3032                 xl_init_locked(sc);
 3033         } else {
 3034                 xl_setmode(sc, ifm->ifm_media);
 3035         }
 3036 
 3037         XL_UNLOCK(sc);
 3038 
 3039         return (0);
 3040 }
 3041 
 3042 /*
 3043  * Report current media status.
 3044  */
 3045 static void
 3046 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 3047 {
 3048         struct xl_softc         *sc = ifp->if_softc;
 3049         u_int32_t               icfg;
 3050         u_int16_t               status = 0;
 3051         struct mii_data         *mii = NULL;
 3052 
 3053         XL_LOCK(sc);
 3054 
 3055         if (sc->xl_miibus != NULL)
 3056                 mii = device_get_softc(sc->xl_miibus);
 3057 
 3058         XL_SEL_WIN(4);
 3059         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3060 
 3061         XL_SEL_WIN(3);
 3062         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
 3063         icfg >>= XL_ICFG_CONNECTOR_BITS;
 3064 
 3065         ifmr->ifm_active = IFM_ETHER;
 3066         ifmr->ifm_status = IFM_AVALID;
 3067 
 3068         if ((status & XL_MEDIASTAT_CARRIER) == 0)
 3069                 ifmr->ifm_status |= IFM_ACTIVE;
 3070 
 3071         switch (icfg) {
 3072         case XL_XCVR_10BT:
 3073                 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
 3074                 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3075                         ifmr->ifm_active |= IFM_FDX;
 3076                 else
 3077                         ifmr->ifm_active |= IFM_HDX;
 3078                 break;
 3079         case XL_XCVR_AUI:
 3080                 if (sc->xl_type == XL_TYPE_905B &&
 3081                     sc->xl_media == XL_MEDIAOPT_10FL) {
 3082                         ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
 3083                         if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3084                                 ifmr->ifm_active |= IFM_FDX;
 3085                         else
 3086                                 ifmr->ifm_active |= IFM_HDX;
 3087                 } else
 3088                         ifmr->ifm_active = IFM_ETHER|IFM_10_5;
 3089                 break;
 3090         case XL_XCVR_COAX:
 3091                 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
 3092                 break;
 3093         /*
 3094          * XXX MII and BTX/AUTO should be separate cases.
 3095          */
 3096 
 3097         case XL_XCVR_100BTX:
 3098         case XL_XCVR_AUTO:
 3099         case XL_XCVR_MII:
 3100                 if (mii != NULL) {
 3101                         mii_pollstat(mii);
 3102                         ifmr->ifm_active = mii->mii_media_active;
 3103                         ifmr->ifm_status = mii->mii_media_status;
 3104                 }
 3105                 break;
 3106         case XL_XCVR_100BFX:
 3107                 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
 3108                 break;
 3109         default:
 3110                 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
 3111                 break;
 3112         }
 3113 
 3114         XL_UNLOCK(sc);
 3115 }
 3116 
 3117 static int
 3118 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 3119 {
 3120         struct xl_softc         *sc = ifp->if_softc;
 3121         struct ifreq            *ifr = (struct ifreq *) data;
 3122         int                     error = 0, mask;
 3123         struct mii_data         *mii = NULL;
 3124         u_int8_t                rxfilt;
 3125 
 3126         switch (command) {
 3127         case SIOCSIFFLAGS:
 3128                 XL_LOCK(sc);
 3129 
 3130                 XL_SEL_WIN(5);
 3131                 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 3132                 if (ifp->if_flags & IFF_UP) {
 3133                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3134                             ifp->if_flags & IFF_PROMISC &&
 3135                             !(sc->xl_if_flags & IFF_PROMISC)) {
 3136                                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 3137                                 CSR_WRITE_2(sc, XL_COMMAND,
 3138                                     XL_CMD_RX_SET_FILT|rxfilt);
 3139                                 XL_SEL_WIN(7);
 3140                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3141                             !(ifp->if_flags & IFF_PROMISC) &&
 3142                             sc->xl_if_flags & IFF_PROMISC) {
 3143                                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 3144                                 CSR_WRITE_2(sc, XL_COMMAND,
 3145                                     XL_CMD_RX_SET_FILT|rxfilt);
 3146                                 XL_SEL_WIN(7);
 3147                         } else
 3148                                 xl_init_locked(sc);
 3149                 } else {
 3150                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3151                                 xl_stop(sc);
 3152                 }
 3153                 sc->xl_if_flags = ifp->if_flags;
 3154                 XL_UNLOCK(sc);
 3155                 error = 0;
 3156                 break;
 3157         case SIOCADDMULTI:
 3158         case SIOCDELMULTI:
 3159                 /* XXX Downcall from if_addmulti() possibly with locks held. */
 3160                 XL_LOCK(sc);
 3161                 if (sc->xl_type == XL_TYPE_905B)
 3162                         xl_setmulti_hash(sc);
 3163                 else
 3164                         xl_setmulti(sc);
 3165                 XL_UNLOCK(sc);
 3166                 error = 0;
 3167                 break;
 3168         case SIOCGIFMEDIA:
 3169         case SIOCSIFMEDIA:
 3170                 if (sc->xl_miibus != NULL)
 3171                         mii = device_get_softc(sc->xl_miibus);
 3172                 if (mii == NULL)
 3173                         error = ifmedia_ioctl(ifp, ifr,
 3174                             &sc->ifmedia, command);
 3175                 else
 3176                         error = ifmedia_ioctl(ifp, ifr,
 3177                             &mii->mii_media, command);
 3178                 break;
 3179         case SIOCSIFCAP:
 3180                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 3181 #ifdef DEVICE_POLLING
 3182                 if ((mask & IFCAP_POLLING) != 0 &&
 3183                     (ifp->if_capabilities & IFCAP_POLLING) != 0) {
 3184                         ifp->if_capenable ^= IFCAP_POLLING;
 3185                         if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
 3186                                 error = ether_poll_register(xl_poll, ifp);
 3187                                 if (error)
 3188                                         break;
 3189                                 XL_LOCK(sc);
 3190                                 /* Disable interrupts */
 3191                                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3192                                 ifp->if_capenable |= IFCAP_POLLING;
 3193                                 XL_UNLOCK(sc);
 3194                         } else {
 3195                                 error = ether_poll_deregister(ifp);
 3196                                 /* Enable interrupts. */
 3197                                 XL_LOCK(sc);
 3198                                 CSR_WRITE_2(sc, XL_COMMAND,
 3199                                     XL_CMD_INTR_ACK | 0xFF);
 3200                                 CSR_WRITE_2(sc, XL_COMMAND,
 3201                                     XL_CMD_INTR_ENB | XL_INTRS);
 3202                                 if (sc->xl_flags & XL_FLAG_FUNCREG)
 3203                                         bus_space_write_4(sc->xl_ftag,
 3204                                             sc->xl_fhandle, 4, 0x8000);
 3205                                 XL_UNLOCK(sc);
 3206                         }
 3207                 }
 3208 #endif /* DEVICE_POLLING */
 3209                 XL_LOCK(sc);
 3210                 if ((mask & IFCAP_TXCSUM) != 0 &&
 3211                     (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
 3212                         ifp->if_capenable ^= IFCAP_TXCSUM;
 3213                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 3214                                 ifp->if_hwassist |= XL905B_CSUM_FEATURES;
 3215                         else
 3216                                 ifp->if_hwassist &= ~XL905B_CSUM_FEATURES;
 3217                 }
 3218                 if ((mask & IFCAP_RXCSUM) != 0 &&
 3219                     (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
 3220                         ifp->if_capenable ^= IFCAP_RXCSUM;
 3221                 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
 3222                     (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
 3223                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 3224                 XL_UNLOCK(sc);
 3225                 break;
 3226         default:
 3227                 error = ether_ioctl(ifp, command, data);
 3228                 break;
 3229         }
 3230 
 3231         return (error);
 3232 }
 3233 
 3234 static int
 3235 xl_watchdog(struct xl_softc *sc)
 3236 {
 3237         struct ifnet            *ifp = sc->xl_ifp;
 3238         u_int16_t               status = 0;
 3239         int                     misintr;
 3240 
 3241         XL_LOCK_ASSERT(sc);
 3242 
 3243         if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
 3244                 return (0);
 3245 
 3246         xl_rxeof(sc);
 3247         xl_txeoc(sc);
 3248         misintr = 0;
 3249         if (sc->xl_type == XL_TYPE_905B) {
 3250                 xl_txeof_90xB(sc);
 3251                 if (sc->xl_cdata.xl_tx_cnt == 0)
 3252                         misintr++;
 3253         } else {
 3254                 xl_txeof(sc);
 3255                 if (sc->xl_cdata.xl_tx_head == NULL)
 3256                         misintr++;
 3257         }
 3258         if (misintr != 0) {
 3259                 device_printf(sc->xl_dev,
 3260                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 3261                 return (0);
 3262         }
 3263 
 3264         ifp->if_oerrors++;
 3265         XL_SEL_WIN(4);
 3266         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3267         device_printf(sc->xl_dev, "watchdog timeout\n");
 3268 
 3269         if (status & XL_MEDIASTAT_CARRIER)
 3270                 device_printf(sc->xl_dev,
 3271                     "no carrier - transceiver cable problem?\n");
 3272 
 3273         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3274         xl_init_locked(sc);
 3275 
 3276         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 3277                 if (sc->xl_type == XL_TYPE_905B)
 3278                         xl_start_90xB_locked(ifp);
 3279                 else
 3280                         xl_start_locked(ifp);
 3281         }
 3282 
 3283         return (EJUSTRETURN);
 3284 }
 3285 
 3286 /*
 3287  * Stop the adapter and free any mbufs allocated to the
 3288  * RX and TX lists.
 3289  */
 3290 static void
 3291 xl_stop(struct xl_softc *sc)
 3292 {
 3293         register int            i;
 3294         struct ifnet            *ifp = sc->xl_ifp;
 3295 
 3296         XL_LOCK_ASSERT(sc);
 3297 
 3298         sc->xl_wdog_timer = 0;
 3299 
 3300         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
 3301         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 3302         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
 3303         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
 3304         xl_wait(sc);
 3305         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
 3306         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 3307         DELAY(800);
 3308 
 3309 #ifdef foo
 3310         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 3311         xl_wait(sc);
 3312         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 3313         xl_wait(sc);
 3314 #endif
 3315 
 3316         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
 3317         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
 3318         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3319         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3320                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 3321 
 3322         /* Stop the stats updater. */
 3323         callout_stop(&sc->xl_stat_callout);
 3324 
 3325         /*
 3326          * Free data in the RX lists.
 3327          */
 3328         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 3329                 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
 3330                         bus_dmamap_unload(sc->xl_mtag,
 3331                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3332                         bus_dmamap_destroy(sc->xl_mtag,
 3333                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3334                         m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
 3335                         sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
 3336                 }
 3337         }
 3338         if (sc->xl_ldata.xl_rx_list != NULL)
 3339                 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
 3340         /*
 3341          * Free the TX list buffers.
 3342          */
 3343         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 3344                 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
 3345                         bus_dmamap_unload(sc->xl_mtag,
 3346                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3347                         bus_dmamap_destroy(sc->xl_mtag,
 3348                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3349                         m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
 3350                         sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
 3351                 }
 3352         }
 3353         if (sc->xl_ldata.xl_tx_list != NULL)
 3354                 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
 3355 
 3356         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3357 }
 3358 
 3359 /*
 3360  * Stop all chip I/O so that the kernel's probe routines don't
 3361  * get confused by errant DMAs when rebooting.
 3362  */
 3363 static int
 3364 xl_shutdown(device_t dev)
 3365 {
 3366 
 3367         return (xl_suspend(dev));
 3368 }
 3369 
 3370 static int
 3371 xl_suspend(device_t dev)
 3372 {
 3373         struct xl_softc         *sc;
 3374 
 3375         sc = device_get_softc(dev);
 3376 
 3377         XL_LOCK(sc);
 3378         xl_stop(sc);
 3379         xl_setwol(sc);
 3380         XL_UNLOCK(sc);
 3381 
 3382         return (0);
 3383 }
 3384 
 3385 static int
 3386 xl_resume(device_t dev)
 3387 {
 3388         struct xl_softc         *sc;
 3389         struct ifnet            *ifp;
 3390 
 3391         sc = device_get_softc(dev);
 3392         ifp = sc->xl_ifp;
 3393 
 3394         XL_LOCK(sc);
 3395 
 3396         if (ifp->if_flags & IFF_UP) {
 3397                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3398                 xl_init_locked(sc);
 3399         }
 3400 
 3401         XL_UNLOCK(sc);
 3402 
 3403         return (0);
 3404 }
 3405 
 3406 static void
 3407 xl_setwol(struct xl_softc *sc)
 3408 {
 3409         struct ifnet            *ifp;
 3410         u_int16_t               cfg, pmstat;
 3411 
 3412         if ((sc->xl_flags & XL_FLAG_WOL) == 0)
 3413                 return;
 3414 
 3415         ifp = sc->xl_ifp;
 3416         XL_SEL_WIN(7);
 3417         /* Clear any pending PME events. */
 3418         CSR_READ_2(sc, XL_W7_BM_PME);
 3419         cfg = 0;
 3420         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3421                 cfg |= XL_BM_PME_MAGIC;
 3422         CSR_WRITE_2(sc, XL_W7_BM_PME, cfg);
 3423         /* Enable RX. */
 3424         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3425                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 3426         /* Request PME. */
 3427         pmstat = pci_read_config(sc->xl_dev,
 3428             sc->xl_pmcap + PCIR_POWER_STATUS, 2);
 3429         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
 3430                 pmstat |= PCIM_PSTAT_PMEENABLE;
 3431         else
 3432                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
 3433         pci_write_config(sc->xl_dev,
 3434             sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2);
 3435 }

Cache object: 8e9a21ccf62954c0534d215dfef6f09d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.