The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 /*
   37  * 3Com 3c90x Etherlink XL PCI NIC driver
   38  *
   39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
   40  * bus-master chips (3c90x cards and embedded controllers) including
   41  * the following:
   42  *
   43  * 3Com 3c900-TPO       10Mbps/RJ-45
   44  * 3Com 3c900-COMBO     10Mbps/RJ-45,AUI,BNC
   45  * 3Com 3c905-TX        10/100Mbps/RJ-45
   46  * 3Com 3c905-T4        10/100Mbps/RJ-45
   47  * 3Com 3c900B-TPO      10Mbps/RJ-45
   48  * 3Com 3c900B-COMBO    10Mbps/RJ-45,AUI,BNC
   49  * 3Com 3c900B-TPC      10Mbps/RJ-45,BNC
   50  * 3Com 3c900B-FL       10Mbps/Fiber-optic
   51  * 3Com 3c905B-COMBO    10/100Mbps/RJ-45,AUI,BNC
   52  * 3Com 3c905B-TX       10/100Mbps/RJ-45
   53  * 3Com 3c905B-FL/FX    10/100Mbps/Fiber-optic
   54  * 3Com 3c905C-TX       10/100Mbps/RJ-45 (Tornado ASIC)
   55  * 3Com 3c980-TX        10/100Mbps server adapter (Hurricane ASIC)
   56  * 3Com 3c980C-TX       10/100Mbps server adapter (Tornado ASIC)
   57  * 3Com 3cSOHO100-TX    10/100Mbps/RJ-45 (Hurricane ASIC)
   58  * 3Com 3c450-TX        10/100Mbps/RJ-45 (Tornado ASIC)
   59  * 3Com 3c555           10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
   60  * 3Com 3c556           10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   61  * 3Com 3c556B          10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   62  * 3Com 3c575TX         10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   63  * 3Com 3c575B          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   64  * 3Com 3c575C          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   65  * 3Com 3cxfem656       10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   66  * 3Com 3cxfem656b      10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   67  * 3Com 3cxfem656c      10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
   68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
   69  * Dell on-board 3c920 10/100Mbps/RJ-45
   70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
   71  * Dell Latitude laptop docking station embedded 3c905-TX
   72  *
   73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   74  * Electrical Engineering Department
   75  * Columbia University, New York City
   76  */
   77 /*
   78  * The 3c90x series chips use a bus-master DMA interface for transfering
   79  * packets to and from the controller chip. Some of the "vortex" cards
   80  * (3c59x) also supported a bus master mode, however for those chips
   81  * you could only DMA packets to/from a contiguous memory buffer. For
   82  * transmission this would mean copying the contents of the queued mbuf
   83  * chain into an mbuf cluster and then DMAing the cluster. This extra
   84  * copy would sort of defeat the purpose of the bus master support for
   85  * any packet that doesn't fit into a single mbuf.
   86  *
   87  * By contrast, the 3c90x cards support a fragment-based bus master
   88  * mode where mbuf chains can be encapsulated using TX descriptors.
   89  * This is similar to other PCI chips such as the Texas Instruments
   90  * ThunderLAN and the Intel 82557/82558.
   91  *
   92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
   93  * bus master chips because they maintain the old PIO interface for
   94  * backwards compatibility, but starting with the 3c905B and the
   95  * "cyclone" chips, the compatibility interface has been dropped.
   96  * Since using bus master DMA is a big win, we use this driver to
   97  * support the PCI "boomerang" chips even though they work with the
   98  * "vortex" driver in order to obtain better performance.
   99  *
  100  * This driver is in the /sys/pci directory because it only supports
  101  * PCI-based NICs.
  102  */
  103 
  104 #ifdef HAVE_KERNEL_OPTION_HEADERS
  105 #include "opt_device_polling.h"
  106 #endif
  107 
  108 #include <sys/param.h>
  109 #include <sys/systm.h>
  110 #include <sys/sockio.h>
  111 #include <sys/endian.h>
  112 #include <sys/mbuf.h>
  113 #include <sys/kernel.h>
  114 #include <sys/module.h>
  115 #include <sys/socket.h>
  116 #include <sys/taskqueue.h>
  117 
  118 #include <net/if.h>
  119 #include <net/if_arp.h>
  120 #include <net/ethernet.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/if_types.h>
  124 
  125 #include <net/bpf.h>
  126 
  127 #include <machine/bus.h>
  128 #include <machine/resource.h>
  129 #include <sys/bus.h>
  130 #include <sys/rman.h>
  131 
  132 #include <dev/mii/mii.h>
  133 #include <dev/mii/miivar.h>
  134 
  135 #include <dev/pci/pcireg.h>
  136 #include <dev/pci/pcivar.h>
  137 
  138 MODULE_DEPEND(xl, pci, 1, 1, 1);
  139 MODULE_DEPEND(xl, ether, 1, 1, 1);
  140 MODULE_DEPEND(xl, miibus, 1, 1, 1);
  141 
  142 /* "device miibus" required.  See GENERIC if you get errors here. */
  143 #include "miibus_if.h"
  144 
  145 #include <pci/if_xlreg.h>
  146 
  147 /*
  148  * TX Checksumming is disabled by default for two reasons:
  149  * - TX Checksumming will occasionally produce corrupt packets
  150  * - TX Checksumming seems to reduce performance
  151  *
  152  * Only 905B/C cards were reported to have this problem, it is possible
  153  * that later chips _may_ be immune.
  154  */
  155 #define XL905B_TXCSUM_BROKEN    1
  156 
  157 #ifdef XL905B_TXCSUM_BROKEN
  158 #define XL905B_CSUM_FEATURES    0
  159 #else
  160 #define XL905B_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  161 #endif
  162 
  163 /*
  164  * Various supported device vendors/types and their names.
  165  */
  166 static const struct xl_type xl_devs[] = {
  167         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
  168                 "3Com 3c900-TPO Etherlink XL" },
  169         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
  170                 "3Com 3c900-COMBO Etherlink XL" },
  171         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
  172                 "3Com 3c905-TX Fast Etherlink XL" },
  173         { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
  174                 "3Com 3c905-T4 Fast Etherlink XL" },
  175         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
  176                 "3Com 3c900B-TPO Etherlink XL" },
  177         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
  178                 "3Com 3c900B-COMBO Etherlink XL" },
  179         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
  180                 "3Com 3c900B-TPC Etherlink XL" },
  181         { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
  182                 "3Com 3c900B-FL Etherlink XL" },
  183         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
  184                 "3Com 3c905B-TX Fast Etherlink XL" },
  185         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
  186                 "3Com 3c905B-T4 Fast Etherlink XL" },
  187         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
  188                 "3Com 3c905B-FX/SC Fast Etherlink XL" },
  189         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
  190                 "3Com 3c905B-COMBO Fast Etherlink XL" },
  191         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
  192                 "3Com 3c905C-TX Fast Etherlink XL" },
  193         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
  194                 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
  195         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
  196                 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
  197         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
  198                 "3Com 3c980 Fast Etherlink XL" },
  199         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
  200                 "3Com 3c980C Fast Etherlink XL" },
  201         { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
  202                 "3Com 3cSOHO100-TX OfficeConnect" },
  203         { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
  204                 "3Com 3c450-TX HomeConnect" },
  205         { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
  206                 "3Com 3c555 Fast Etherlink XL" },
  207         { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
  208                 "3Com 3c556 Fast Etherlink XL" },
  209         { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
  210                 "3Com 3c556B Fast Etherlink XL" },
  211         { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
  212                 "3Com 3c575TX Fast Etherlink XL" },
  213         { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
  214                 "3Com 3c575B Fast Etherlink XL" },
  215         { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
  216                 "3Com 3c575C Fast Etherlink XL" },
  217         { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
  218                 "3Com 3c656 Fast Etherlink XL" },
  219         { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
  220                 "3Com 3c656B Fast Etherlink XL" },
  221         { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
  222                 "3Com 3c656C Fast Etherlink XL" },
  223         { 0, 0, NULL }
  224 };
  225 
  226 static int xl_probe(device_t);
  227 static int xl_attach(device_t);
  228 static int xl_detach(device_t);
  229 
  230 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
  231 static void xl_stats_update(void *);
  232 static void xl_stats_update_locked(struct xl_softc *);
  233 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **);
  234 static void xl_rxeof(struct xl_softc *);
  235 static void xl_rxeof_task(void *, int);
  236 static int xl_rx_resync(struct xl_softc *);
  237 static void xl_txeof(struct xl_softc *);
  238 static void xl_txeof_90xB(struct xl_softc *);
  239 static void xl_txeoc(struct xl_softc *);
  240 static void xl_intr(void *);
  241 static void xl_start(struct ifnet *);
  242 static void xl_start_locked(struct ifnet *);
  243 static void xl_start_90xB_locked(struct ifnet *);
  244 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
  245 static void xl_init(void *);
  246 static void xl_init_locked(struct xl_softc *);
  247 static void xl_stop(struct xl_softc *);
  248 static int xl_watchdog(struct xl_softc *);
  249 static int xl_shutdown(device_t);
  250 static int xl_suspend(device_t);
  251 static int xl_resume(device_t);
  252 
  253 #ifdef DEVICE_POLLING
  254 static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
  255 static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
  256 #endif
  257 
  258 static int xl_ifmedia_upd(struct ifnet *);
  259 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  260 
  261 static int xl_eeprom_wait(struct xl_softc *);
  262 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
  263 static void xl_mii_sync(struct xl_softc *);
  264 static void xl_mii_send(struct xl_softc *, u_int32_t, int);
  265 static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
  266 static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
  267 
  268 static void xl_setcfg(struct xl_softc *);
  269 static void xl_setmode(struct xl_softc *, int);
  270 static void xl_setmulti(struct xl_softc *);
  271 static void xl_setmulti_hash(struct xl_softc *);
  272 static void xl_reset(struct xl_softc *);
  273 static int xl_list_rx_init(struct xl_softc *);
  274 static int xl_list_tx_init(struct xl_softc *);
  275 static int xl_list_tx_init_90xB(struct xl_softc *);
  276 static void xl_wait(struct xl_softc *);
  277 static void xl_mediacheck(struct xl_softc *);
  278 static void xl_choose_media(struct xl_softc *sc, int *media);
  279 static void xl_choose_xcvr(struct xl_softc *, int);
  280 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  281 #ifdef notdef
  282 static void xl_testpacket(struct xl_softc *);
  283 #endif
  284 
  285 static int xl_miibus_readreg(device_t, int, int);
  286 static int xl_miibus_writereg(device_t, int, int, int);
  287 static void xl_miibus_statchg(device_t);
  288 static void xl_miibus_mediainit(device_t);
  289 
  290 static device_method_t xl_methods[] = {
  291         /* Device interface */
  292         DEVMETHOD(device_probe,         xl_probe),
  293         DEVMETHOD(device_attach,        xl_attach),
  294         DEVMETHOD(device_detach,        xl_detach),
  295         DEVMETHOD(device_shutdown,      xl_shutdown),
  296         DEVMETHOD(device_suspend,       xl_suspend),
  297         DEVMETHOD(device_resume,        xl_resume),
  298 
  299         /* bus interface */
  300         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  301         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  302 
  303         /* MII interface */
  304         DEVMETHOD(miibus_readreg,       xl_miibus_readreg),
  305         DEVMETHOD(miibus_writereg,      xl_miibus_writereg),
  306         DEVMETHOD(miibus_statchg,       xl_miibus_statchg),
  307         DEVMETHOD(miibus_mediainit,     xl_miibus_mediainit),
  308 
  309         { 0, 0 }
  310 };
  311 
  312 static driver_t xl_driver = {
  313         "xl",
  314         xl_methods,
  315         sizeof(struct xl_softc)
  316 };
  317 
  318 static devclass_t xl_devclass;
  319 
  320 DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
  321 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
  322 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
  323 
  324 static void
  325 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  326 {
  327         u_int32_t *paddr;
  328 
  329         paddr = arg;
  330         *paddr = segs->ds_addr;
  331 }
  332 
  333 /*
  334  * Murphy's law says that it's possible the chip can wedge and
  335  * the 'command in progress' bit may never clear. Hence, we wait
  336  * only a finite amount of time to avoid getting caught in an
  337  * infinite loop. Normally this delay routine would be a macro,
  338  * but it isn't called during normal operation so we can afford
  339  * to make it a function.
  340  */
  341 static void
  342 xl_wait(struct xl_softc *sc)
  343 {
  344         register int            i;
  345 
  346         for (i = 0; i < XL_TIMEOUT; i++) {
  347                 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
  348                         break;
  349         }
  350 
  351         if (i == XL_TIMEOUT)
  352                 device_printf(sc->xl_dev, "command never completed!\n");
  353 }
  354 
  355 /*
  356  * MII access routines are provided for adapters with external
  357  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
  358  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
  359  * Note: if you don't perform the MDIO operations just right,
  360  * it's possible to end up with code that works correctly with
  361  * some chips/CPUs/processor speeds/bus speeds/etc but not
  362  * with others.
  363  */
  364 #define MII_SET(x)                                      \
  365         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  366                 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
  367 
  368 #define MII_CLR(x)                                      \
  369         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  370                 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
  371 
  372 /*
  373  * Sync the PHYs by setting data bit and strobing the clock 32 times.
  374  */
  375 static void
  376 xl_mii_sync(struct xl_softc *sc)
  377 {
  378         register int            i;
  379 
  380         XL_SEL_WIN(4);
  381         MII_SET(XL_MII_DIR|XL_MII_DATA);
  382 
  383         for (i = 0; i < 32; i++) {
  384                 MII_SET(XL_MII_CLK);
  385                 MII_SET(XL_MII_DATA);
  386                 MII_SET(XL_MII_DATA);
  387                 MII_CLR(XL_MII_CLK);
  388                 MII_SET(XL_MII_DATA);
  389                 MII_SET(XL_MII_DATA);
  390         }
  391 }
  392 
  393 /*
  394  * Clock a series of bits through the MII.
  395  */
  396 static void
  397 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
  398 {
  399         int                     i;
  400 
  401         XL_SEL_WIN(4);
  402         MII_CLR(XL_MII_CLK);
  403 
  404         for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
  405                 if (bits & i) {
  406                         MII_SET(XL_MII_DATA);
  407                 } else {
  408                         MII_CLR(XL_MII_DATA);
  409                 }
  410                 MII_CLR(XL_MII_CLK);
  411                 MII_SET(XL_MII_CLK);
  412         }
  413 }
  414 
  415 /*
  416  * Read an PHY register through the MII.
  417  */
  418 static int
  419 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
  420 {
  421         int                     i, ack;
  422 
  423         /* Set up frame for RX. */
  424         frame->mii_stdelim = XL_MII_STARTDELIM;
  425         frame->mii_opcode = XL_MII_READOP;
  426         frame->mii_turnaround = 0;
  427         frame->mii_data = 0;
  428 
  429         /* Select register window 4. */
  430         XL_SEL_WIN(4);
  431 
  432         CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
  433         /* Turn on data xmit. */
  434         MII_SET(XL_MII_DIR);
  435 
  436         xl_mii_sync(sc);
  437 
  438         /* Send command/address info. */
  439         xl_mii_send(sc, frame->mii_stdelim, 2);
  440         xl_mii_send(sc, frame->mii_opcode, 2);
  441         xl_mii_send(sc, frame->mii_phyaddr, 5);
  442         xl_mii_send(sc, frame->mii_regaddr, 5);
  443 
  444         /* Idle bit */
  445         MII_CLR((XL_MII_CLK|XL_MII_DATA));
  446         MII_SET(XL_MII_CLK);
  447 
  448         /* Turn off xmit. */
  449         MII_CLR(XL_MII_DIR);
  450 
  451         /* Check for ack */
  452         MII_CLR(XL_MII_CLK);
  453         ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
  454         MII_SET(XL_MII_CLK);
  455 
  456         /*
  457          * Now try reading data bits. If the ack failed, we still
  458          * need to clock through 16 cycles to keep the PHY(s) in sync.
  459          */
  460         if (ack) {
  461                 for (i = 0; i < 16; i++) {
  462                         MII_CLR(XL_MII_CLK);
  463                         MII_SET(XL_MII_CLK);
  464                 }
  465                 goto fail;
  466         }
  467 
  468         for (i = 0x8000; i; i >>= 1) {
  469                 MII_CLR(XL_MII_CLK);
  470                 if (!ack) {
  471                         if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
  472                                 frame->mii_data |= i;
  473                 }
  474                 MII_SET(XL_MII_CLK);
  475         }
  476 
  477 fail:
  478         MII_CLR(XL_MII_CLK);
  479         MII_SET(XL_MII_CLK);
  480 
  481         return (ack ? 1 : 0);
  482 }
  483 
  484 /*
  485  * Write to a PHY register through the MII.
  486  */
  487 static int
  488 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
  489 {
  490 
  491         /* Set up frame for TX. */
  492         frame->mii_stdelim = XL_MII_STARTDELIM;
  493         frame->mii_opcode = XL_MII_WRITEOP;
  494         frame->mii_turnaround = XL_MII_TURNAROUND;
  495 
  496         /* Select the window 4. */
  497         XL_SEL_WIN(4);
  498 
  499         /* Turn on data output. */
  500         MII_SET(XL_MII_DIR);
  501 
  502         xl_mii_sync(sc);
  503 
  504         xl_mii_send(sc, frame->mii_stdelim, 2);
  505         xl_mii_send(sc, frame->mii_opcode, 2);
  506         xl_mii_send(sc, frame->mii_phyaddr, 5);
  507         xl_mii_send(sc, frame->mii_regaddr, 5);
  508         xl_mii_send(sc, frame->mii_turnaround, 2);
  509         xl_mii_send(sc, frame->mii_data, 16);
  510 
  511         /* Idle bit. */
  512         MII_SET(XL_MII_CLK);
  513         MII_CLR(XL_MII_CLK);
  514 
  515         /* Turn off xmit. */
  516         MII_CLR(XL_MII_DIR);
  517 
  518         return (0);
  519 }
  520 
  521 static int
  522 xl_miibus_readreg(device_t dev, int phy, int reg)
  523 {
  524         struct xl_softc         *sc;
  525         struct xl_mii_frame     frame;
  526 
  527         sc = device_get_softc(dev);
  528 
  529         /*
  530          * Pretend that PHYs are only available at MII address 24.
  531          * This is to guard against problems with certain 3Com ASIC
  532          * revisions that incorrectly map the internal transceiver
  533          * control registers at all MII addresses. This can cause
  534          * the miibus code to attach the same PHY several times over.
  535          */
  536         if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
  537                 return (0);
  538 
  539         bzero((char *)&frame, sizeof(frame));
  540         frame.mii_phyaddr = phy;
  541         frame.mii_regaddr = reg;
  542 
  543         xl_mii_readreg(sc, &frame);
  544 
  545         return (frame.mii_data);
  546 }
  547 
  548 static int
  549 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
  550 {
  551         struct xl_softc         *sc;
  552         struct xl_mii_frame     frame;
  553 
  554         sc = device_get_softc(dev);
  555 
  556         if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
  557                 return (0);
  558 
  559         bzero((char *)&frame, sizeof(frame));
  560         frame.mii_phyaddr = phy;
  561         frame.mii_regaddr = reg;
  562         frame.mii_data = data;
  563 
  564         xl_mii_writereg(sc, &frame);
  565 
  566         return (0);
  567 }
  568 
  569 static void
  570 xl_miibus_statchg(device_t dev)
  571 {
  572         struct xl_softc         *sc;
  573         struct mii_data         *mii;
  574 
  575         sc = device_get_softc(dev);
  576         mii = device_get_softc(sc->xl_miibus);
  577 
  578         xl_setcfg(sc);
  579 
  580         /* Set ASIC's duplex mode to match the PHY. */
  581         XL_SEL_WIN(3);
  582         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
  583                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  584         else
  585                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  586                     (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  587 }
  588 
  589 /*
  590  * Special support for the 3c905B-COMBO. This card has 10/100 support
  591  * plus BNC and AUI ports. This means we will have both an miibus attached
  592  * plus some non-MII media settings. In order to allow this, we have to
  593  * add the extra media to the miibus's ifmedia struct, but we can't do
  594  * that during xl_attach() because the miibus hasn't been attached yet.
  595  * So instead, we wait until the miibus probe/attach is done, at which
  596  * point we will get a callback telling is that it's safe to add our
  597  * extra media.
  598  */
  599 static void
  600 xl_miibus_mediainit(device_t dev)
  601 {
  602         struct xl_softc         *sc;
  603         struct mii_data         *mii;
  604         struct ifmedia          *ifm;
  605 
  606         sc = device_get_softc(dev);
  607         mii = device_get_softc(sc->xl_miibus);
  608         ifm = &mii->mii_media;
  609 
  610         if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
  611                 /*
  612                  * Check for a 10baseFL board in disguise.
  613                  */
  614                 if (sc->xl_type == XL_TYPE_905B &&
  615                     sc->xl_media == XL_MEDIAOPT_10FL) {
  616                         if (bootverbose)
  617                                 device_printf(sc->xl_dev, "found 10baseFL\n");
  618                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
  619                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
  620                             NULL);
  621                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
  622                                 ifmedia_add(ifm,
  623                                     IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
  624                 } else {
  625                         if (bootverbose)
  626                                 device_printf(sc->xl_dev, "found AUI\n");
  627                         ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
  628                 }
  629         }
  630 
  631         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  632                 if (bootverbose)
  633                         device_printf(sc->xl_dev, "found BNC\n");
  634                 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
  635         }
  636 }
  637 
  638 /*
  639  * The EEPROM is slow: give it time to come ready after issuing
  640  * it a command.
  641  */
  642 static int
  643 xl_eeprom_wait(struct xl_softc *sc)
  644 {
  645         int                     i;
  646 
  647         for (i = 0; i < 100; i++) {
  648                 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
  649                         DELAY(162);
  650                 else
  651                         break;
  652         }
  653 
  654         if (i == 100) {
  655                 device_printf(sc->xl_dev, "eeprom failed to come ready\n");
  656                 return (1);
  657         }
  658 
  659         return (0);
  660 }
  661 
  662 /*
  663  * Read a sequence of words from the EEPROM. Note that ethernet address
  664  * data is stored in the EEPROM in network byte order.
  665  */
  666 static int
  667 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
  668 {
  669         int                     err = 0, i;
  670         u_int16_t               word = 0, *ptr;
  671 
  672 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
  673 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
  674         /*
  675          * XXX: WARNING! DANGER!
  676          * It's easy to accidentally overwrite the rom content!
  677          * Note: the 3c575 uses 8bit EEPROM offsets.
  678          */
  679         XL_SEL_WIN(0);
  680 
  681         if (xl_eeprom_wait(sc))
  682                 return (1);
  683 
  684         if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
  685                 off += 0x30;
  686 
  687         for (i = 0; i < cnt; i++) {
  688                 if (sc->xl_flags & XL_FLAG_8BITROM)
  689                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  690                             XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
  691                 else
  692                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  693                             XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
  694                 err = xl_eeprom_wait(sc);
  695                 if (err)
  696                         break;
  697                 word = CSR_READ_2(sc, XL_W0_EE_DATA);
  698                 ptr = (u_int16_t *)(dest + (i * 2));
  699                 if (swap)
  700                         *ptr = ntohs(word);
  701                 else
  702                         *ptr = word;
  703         }
  704 
  705         return (err ? 1 : 0);
  706 }
  707 
  708 /*
  709  * NICs older than the 3c905B have only one multicast option, which
  710  * is to enable reception of all multicast frames.
  711  */
  712 static void
  713 xl_setmulti(struct xl_softc *sc)
  714 {
  715         struct ifnet            *ifp = sc->xl_ifp;
  716         struct ifmultiaddr      *ifma;
  717         u_int8_t                rxfilt;
  718         int                     mcnt = 0;
  719 
  720         XL_LOCK_ASSERT(sc);
  721 
  722         XL_SEL_WIN(5);
  723         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  724 
  725         if (ifp->if_flags & IFF_ALLMULTI) {
  726                 rxfilt |= XL_RXFILTER_ALLMULTI;
  727                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  728                 return;
  729         }
  730 
  731         IF_ADDR_LOCK(ifp);
  732         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
  733                 mcnt++;
  734         IF_ADDR_UNLOCK(ifp);
  735 
  736         if (mcnt)
  737                 rxfilt |= XL_RXFILTER_ALLMULTI;
  738         else
  739                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  740 
  741         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  742 }
  743 
  744 /*
  745  * 3c905B adapters have a hash filter that we can program.
  746  */
  747 static void
  748 xl_setmulti_hash(struct xl_softc *sc)
  749 {
  750         struct ifnet            *ifp = sc->xl_ifp;
  751         int                     h = 0, i;
  752         struct ifmultiaddr      *ifma;
  753         u_int8_t                rxfilt;
  754         int                     mcnt = 0;
  755 
  756         XL_LOCK_ASSERT(sc);
  757 
  758         XL_SEL_WIN(5);
  759         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  760 
  761         if (ifp->if_flags & IFF_ALLMULTI) {
  762                 rxfilt |= XL_RXFILTER_ALLMULTI;
  763                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  764                 return;
  765         } else
  766                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  767 
  768         /* first, zot all the existing hash bits */
  769         for (i = 0; i < XL_HASHFILT_SIZE; i++)
  770                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
  771 
  772         /* now program new ones */
  773         IF_ADDR_LOCK(ifp);
  774         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  775                 if (ifma->ifma_addr->sa_family != AF_LINK)
  776                         continue;
  777                 /*
  778                  * Note: the 3c905B currently only supports a 64-bit hash
  779                  * table, which means we really only need 6 bits, but the
  780                  * manual indicates that future chip revisions will have a
  781                  * 256-bit hash table, hence the routine is set up to
  782                  * calculate 8 bits of position info in case we need it some
  783                  * day.
  784                  * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
  785                  * a 256 bit hash table. This means we have to use all 8 bits
  786                  * regardless. On older cards, the upper 2 bits will be
  787                  * ignored. Grrrr....
  788                  */
  789                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  790                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
  791                 CSR_WRITE_2(sc, XL_COMMAND,
  792                     h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
  793                 mcnt++;
  794         }
  795         IF_ADDR_UNLOCK(ifp);
  796 
  797         if (mcnt)
  798                 rxfilt |= XL_RXFILTER_MULTIHASH;
  799         else
  800                 rxfilt &= ~XL_RXFILTER_MULTIHASH;
  801 
  802         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  803 }
  804 
  805 #ifdef notdef
  806 static void
  807 xl_testpacket(struct xl_softc *sc)
  808 {
  809         struct mbuf             *m;
  810         struct ifnet            *ifp = sc->xl_ifp;
  811 
  812         MGETHDR(m, M_DONTWAIT, MT_DATA);
  813 
  814         if (m == NULL)
  815                 return;
  816 
  817         bcopy(&IFP2ENADDR(sc->xl_ifp),
  818                 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
  819         bcopy(&IFP2ENADDR(sc->xl_ifp),
  820                 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
  821         mtod(m, struct ether_header *)->ether_type = htons(3);
  822         mtod(m, unsigned char *)[14] = 0;
  823         mtod(m, unsigned char *)[15] = 0;
  824         mtod(m, unsigned char *)[16] = 0xE3;
  825         m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
  826         IFQ_ENQUEUE(&ifp->if_snd, m);
  827         xl_start(ifp);
  828 }
  829 #endif
  830 
  831 static void
  832 xl_setcfg(struct xl_softc *sc)
  833 {
  834         u_int32_t               icfg;
  835 
  836         /*XL_LOCK_ASSERT(sc);*/
  837 
  838         XL_SEL_WIN(3);
  839         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  840         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  841         if (sc->xl_media & XL_MEDIAOPT_MII ||
  842                 sc->xl_media & XL_MEDIAOPT_BT4)
  843                 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
  844         if (sc->xl_media & XL_MEDIAOPT_BTX)
  845                 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
  846 
  847         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  848         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  849 }
  850 
  851 static void
  852 xl_setmode(struct xl_softc *sc, int media)
  853 {
  854         u_int32_t               icfg;
  855         u_int16_t               mediastat;
  856         char                    *pmsg = "", *dmsg = "";
  857 
  858         XL_LOCK_ASSERT(sc);
  859 
  860         XL_SEL_WIN(4);
  861         mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
  862         XL_SEL_WIN(3);
  863         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  864 
  865         if (sc->xl_media & XL_MEDIAOPT_BT) {
  866                 if (IFM_SUBTYPE(media) == IFM_10_T) {
  867                         pmsg = "10baseT transceiver";
  868                         sc->xl_xcvr = XL_XCVR_10BT;
  869                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  870                         icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
  871                         mediastat |= XL_MEDIASTAT_LINKBEAT |
  872                             XL_MEDIASTAT_JABGUARD;
  873                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  874                 }
  875         }
  876 
  877         if (sc->xl_media & XL_MEDIAOPT_BFX) {
  878                 if (IFM_SUBTYPE(media) == IFM_100_FX) {
  879                         pmsg = "100baseFX port";
  880                         sc->xl_xcvr = XL_XCVR_100BFX;
  881                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  882                         icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
  883                         mediastat |= XL_MEDIASTAT_LINKBEAT;
  884                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  885                 }
  886         }
  887 
  888         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
  889                 if (IFM_SUBTYPE(media) == IFM_10_5) {
  890                         pmsg = "AUI port";
  891                         sc->xl_xcvr = XL_XCVR_AUI;
  892                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  893                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  894                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  895                             XL_MEDIASTAT_JABGUARD);
  896                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  897                 }
  898                 if (IFM_SUBTYPE(media) == IFM_10_FL) {
  899                         pmsg = "10baseFL transceiver";
  900                         sc->xl_xcvr = XL_XCVR_AUI;
  901                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  902                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  903                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  904                             XL_MEDIASTAT_JABGUARD);
  905                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  906                 }
  907         }
  908 
  909         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  910                 if (IFM_SUBTYPE(media) == IFM_10_2) {
  911                         pmsg = "AUI port";
  912                         sc->xl_xcvr = XL_XCVR_COAX;
  913                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  914                         icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
  915                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  916                             XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
  917                 }
  918         }
  919 
  920         if ((media & IFM_GMASK) == IFM_FDX ||
  921                         IFM_SUBTYPE(media) == IFM_100_FX) {
  922                 dmsg = "full";
  923                 XL_SEL_WIN(3);
  924                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  925         } else {
  926                 dmsg = "half";
  927                 XL_SEL_WIN(3);
  928                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  929                         (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  930         }
  931 
  932         if (IFM_SUBTYPE(media) == IFM_10_2)
  933                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
  934         else
  935                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  936 
  937         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  938         XL_SEL_WIN(4);
  939         CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
  940 
  941         DELAY(800);
  942         XL_SEL_WIN(7);
  943 
  944         device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
  945 }
  946 
  947 static void
  948 xl_reset(struct xl_softc *sc)
  949 {
  950         register int            i;
  951 
  952         XL_LOCK_ASSERT(sc);
  953 
  954         XL_SEL_WIN(0);
  955         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
  956             ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
  957              XL_RESETOPT_DISADVFD:0));
  958 
  959         /*
  960          * If we're using memory mapped register mode, pause briefly
  961          * after issuing the reset command before trying to access any
  962          * other registers. With my 3c575C cardbus card, failing to do
  963          * this results in the system locking up while trying to poll
  964          * the command busy bit in the status register.
  965          */
  966         if (sc->xl_flags & XL_FLAG_USE_MMIO)
  967                 DELAY(100000);
  968 
  969         for (i = 0; i < XL_TIMEOUT; i++) {
  970                 DELAY(10);
  971                 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
  972                         break;
  973         }
  974 
  975         if (i == XL_TIMEOUT)
  976                 device_printf(sc->xl_dev, "reset didn't complete\n");
  977 
  978         /* Reset TX and RX. */
  979         /* Note: the RX reset takes an absurd amount of time
  980          * on newer versions of the Tornado chips such as those
  981          * on the 3c905CX and newer 3c908C cards. We wait an
  982          * extra amount of time so that xl_wait() doesn't complain
  983          * and annoy the users.
  984          */
  985         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
  986         DELAY(100000);
  987         xl_wait(sc);
  988         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
  989         xl_wait(sc);
  990 
  991         if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
  992             sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
  993                 XL_SEL_WIN(2);
  994                 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
  995                     CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
  996                     ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
  997                     XL_RESETOPT_INVERT_LED : 0) |
  998                     ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
  999                     XL_RESETOPT_INVERT_MII : 0));
 1000         }
 1001 
 1002         /* Wait a little while for the chip to get its brains in order. */
 1003         DELAY(100000);
 1004 }
 1005 
 1006 /*
 1007  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
 1008  * IDs against our list and return a device name if we find a match.
 1009  */
 1010 static int
 1011 xl_probe(device_t dev)
 1012 {
 1013         const struct xl_type    *t;
 1014 
 1015         t = xl_devs;
 1016 
 1017         while (t->xl_name != NULL) {
 1018                 if ((pci_get_vendor(dev) == t->xl_vid) &&
 1019                     (pci_get_device(dev) == t->xl_did)) {
 1020                         device_set_desc(dev, t->xl_name);
 1021                         return (BUS_PROBE_DEFAULT);
 1022                 }
 1023                 t++;
 1024         }
 1025 
 1026         return (ENXIO);
 1027 }
 1028 
 1029 /*
 1030  * This routine is a kludge to work around possible hardware faults
 1031  * or manufacturing defects that can cause the media options register
 1032  * (or reset options register, as it's called for the first generation
 1033  * 3c90x adapters) to return an incorrect result. I have encountered
 1034  * one Dell Latitude laptop docking station with an integrated 3c905-TX
 1035  * which doesn't have any of the 'mediaopt' bits set. This screws up
 1036  * the attach routine pretty badly because it doesn't know what media
 1037  * to look for. If we find ourselves in this predicament, this routine
 1038  * will try to guess the media options values and warn the user of a
 1039  * possible manufacturing defect with his adapter/system/whatever.
 1040  */
 1041 static void
 1042 xl_mediacheck(struct xl_softc *sc)
 1043 {
 1044 
 1045         /*
 1046          * If some of the media options bits are set, assume they are
 1047          * correct. If not, try to figure it out down below.
 1048          * XXX I should check for 10baseFL, but I don't have an adapter
 1049          * to test with.
 1050          */
 1051         if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
 1052                 /*
 1053                  * Check the XCVR value. If it's not in the normal range
 1054                  * of values, we need to fake it up here.
 1055                  */
 1056                 if (sc->xl_xcvr <= XL_XCVR_AUTO)
 1057                         return;
 1058                 else {
 1059                         device_printf(sc->xl_dev,
 1060                             "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
 1061                         device_printf(sc->xl_dev,
 1062                             "choosing new default based on card type\n");
 1063                 }
 1064         } else {
 1065                 if (sc->xl_type == XL_TYPE_905B &&
 1066                     sc->xl_media & XL_MEDIAOPT_10FL)
 1067                         return;
 1068                 device_printf(sc->xl_dev,
 1069 "WARNING: no media options bits set in the media options register!!\n");
 1070                 device_printf(sc->xl_dev,
 1071 "this could be a manufacturing defect in your adapter or system\n");
 1072                 device_printf(sc->xl_dev,
 1073 "attempting to guess media type; you should probably consult your vendor\n");
 1074         }
 1075 
 1076         xl_choose_xcvr(sc, 1);
 1077 }
 1078 
 1079 static void
 1080 xl_choose_xcvr(struct xl_softc *sc, int verbose)
 1081 {
 1082         u_int16_t               devid;
 1083 
 1084         /*
 1085          * Read the device ID from the EEPROM.
 1086          * This is what's loaded into the PCI device ID register, so it has
 1087          * to be correct otherwise we wouldn't have gotten this far.
 1088          */
 1089         xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
 1090 
 1091         switch (devid) {
 1092         case TC_DEVICEID_BOOMERANG_10BT:        /* 3c900-TPO */
 1093         case TC_DEVICEID_KRAKATOA_10BT:         /* 3c900B-TPO */
 1094                 sc->xl_media = XL_MEDIAOPT_BT;
 1095                 sc->xl_xcvr = XL_XCVR_10BT;
 1096                 if (verbose)
 1097                         device_printf(sc->xl_dev,
 1098                             "guessing 10BaseT transceiver\n");
 1099                 break;
 1100         case TC_DEVICEID_BOOMERANG_10BT_COMBO:  /* 3c900-COMBO */
 1101         case TC_DEVICEID_KRAKATOA_10BT_COMBO:   /* 3c900B-COMBO */
 1102                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1103                 sc->xl_xcvr = XL_XCVR_10BT;
 1104                 if (verbose)
 1105                         device_printf(sc->xl_dev,
 1106                             "guessing COMBO (AUI/BNC/TP)\n");
 1107                 break;
 1108         case TC_DEVICEID_KRAKATOA_10BT_TPC:     /* 3c900B-TPC */
 1109                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
 1110                 sc->xl_xcvr = XL_XCVR_10BT;
 1111                 if (verbose)
 1112                         device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
 1113                 break;
 1114         case TC_DEVICEID_CYCLONE_10FL:          /* 3c900B-FL */
 1115                 sc->xl_media = XL_MEDIAOPT_10FL;
 1116                 sc->xl_xcvr = XL_XCVR_AUI;
 1117                 if (verbose)
 1118                         device_printf(sc->xl_dev, "guessing 10baseFL\n");
 1119                 break;
 1120         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1121         case TC_DEVICEID_HURRICANE_555:         /* 3c555 */
 1122         case TC_DEVICEID_HURRICANE_556:         /* 3c556 */
 1123         case TC_DEVICEID_HURRICANE_556B:        /* 3c556B */
 1124         case TC_DEVICEID_HURRICANE_575A:        /* 3c575TX */
 1125         case TC_DEVICEID_HURRICANE_575B:        /* 3c575B */
 1126         case TC_DEVICEID_HURRICANE_575C:        /* 3c575C */
 1127         case TC_DEVICEID_HURRICANE_656:         /* 3c656 */
 1128         case TC_DEVICEID_HURRICANE_656B:        /* 3c656B */
 1129         case TC_DEVICEID_TORNADO_656C:          /* 3c656C */
 1130         case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
 1131         case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:     /* 3c920B-EMB-WNM */
 1132                 sc->xl_media = XL_MEDIAOPT_MII;
 1133                 sc->xl_xcvr = XL_XCVR_MII;
 1134                 if (verbose)
 1135                         device_printf(sc->xl_dev, "guessing MII\n");
 1136                 break;
 1137         case TC_DEVICEID_BOOMERANG_100BT4:      /* 3c905-T4 */
 1138         case TC_DEVICEID_CYCLONE_10_100BT4:     /* 3c905B-T4 */
 1139                 sc->xl_media = XL_MEDIAOPT_BT4;
 1140                 sc->xl_xcvr = XL_XCVR_MII;
 1141                 if (verbose)
 1142                         device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
 1143                 break;
 1144         case TC_DEVICEID_HURRICANE_10_100BT:    /* 3c905B-TX */
 1145         case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
 1146         case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
 1147         case TC_DEVICEID_HURRICANE_SOHO100TX:   /* 3cSOHO100-TX */
 1148         case TC_DEVICEID_TORNADO_10_100BT:      /* 3c905C-TX */
 1149         case TC_DEVICEID_TORNADO_HOMECONNECT:   /* 3c450-TX */
 1150                 sc->xl_media = XL_MEDIAOPT_BTX;
 1151                 sc->xl_xcvr = XL_XCVR_AUTO;
 1152                 if (verbose)
 1153                         device_printf(sc->xl_dev, "guessing 10/100 internal\n");
 1154                 break;
 1155         case TC_DEVICEID_CYCLONE_10_100_COMBO:  /* 3c905B-COMBO */
 1156                 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1157                 sc->xl_xcvr = XL_XCVR_AUTO;
 1158                 if (verbose)
 1159                         device_printf(sc->xl_dev,
 1160                             "guessing 10/100 plus BNC/AUI\n");
 1161                 break;
 1162         default:
 1163                 device_printf(sc->xl_dev,
 1164                     "unknown device ID: %x -- defaulting to 10baseT\n", devid);
 1165                 sc->xl_media = XL_MEDIAOPT_BT;
 1166                 break;
 1167         }
 1168 }
 1169 
 1170 /*
 1171  * Attach the interface. Allocate softc structures, do ifmedia
 1172  * setup and ethernet/BPF attach.
 1173  */
 1174 static int
 1175 xl_attach(device_t dev)
 1176 {
 1177         u_char                  eaddr[ETHER_ADDR_LEN];
 1178         u_int16_t               xcvr[2];
 1179         struct xl_softc         *sc;
 1180         struct ifnet            *ifp;
 1181         int                     media;
 1182         int                     unit, error = 0, rid, res;
 1183         uint16_t                did;
 1184 
 1185         sc = device_get_softc(dev);
 1186         sc->xl_dev = dev;
 1187 
 1188         unit = device_get_unit(dev);
 1189 
 1190         mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1191             MTX_DEF);
 1192         ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
 1193 
 1194         did = pci_get_device(dev);
 1195 
 1196         sc->xl_flags = 0;
 1197         if (did == TC_DEVICEID_HURRICANE_555)
 1198                 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
 1199         if (did == TC_DEVICEID_HURRICANE_556 ||
 1200             did == TC_DEVICEID_HURRICANE_556B)
 1201                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
 1202                     XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
 1203                     XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
 1204         if (did == TC_DEVICEID_HURRICANE_555 ||
 1205             did == TC_DEVICEID_HURRICANE_556)
 1206                 sc->xl_flags |= XL_FLAG_8BITROM;
 1207         if (did == TC_DEVICEID_HURRICANE_556B)
 1208                 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
 1209 
 1210         if (did == TC_DEVICEID_HURRICANE_575B ||
 1211             did == TC_DEVICEID_HURRICANE_575C ||
 1212             did == TC_DEVICEID_HURRICANE_656B ||
 1213             did == TC_DEVICEID_TORNADO_656C)
 1214                 sc->xl_flags |= XL_FLAG_FUNCREG;
 1215         if (did == TC_DEVICEID_HURRICANE_575A ||
 1216             did == TC_DEVICEID_HURRICANE_575B ||
 1217             did == TC_DEVICEID_HURRICANE_575C ||
 1218             did == TC_DEVICEID_HURRICANE_656B ||
 1219             did == TC_DEVICEID_TORNADO_656C)
 1220                 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
 1221                   XL_FLAG_8BITROM;
 1222         if (did == TC_DEVICEID_HURRICANE_656)
 1223                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
 1224         if (did == TC_DEVICEID_HURRICANE_575B)
 1225                 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
 1226         if (did == TC_DEVICEID_HURRICANE_575C)
 1227                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1228         if (did == TC_DEVICEID_TORNADO_656C)
 1229                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1230         if (did == TC_DEVICEID_HURRICANE_656 ||
 1231             did == TC_DEVICEID_HURRICANE_656B)
 1232                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
 1233                     XL_FLAG_INVERT_LED_PWR;
 1234         if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
 1235             did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
 1236                 sc->xl_flags |= XL_FLAG_PHYOK;
 1237 
 1238         switch (did) {
 1239         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1240         case TC_DEVICEID_HURRICANE_575A:
 1241         case TC_DEVICEID_HURRICANE_575B:
 1242         case TC_DEVICEID_HURRICANE_575C:
 1243                 sc->xl_flags |= XL_FLAG_NO_MMIO;
 1244                 break;
 1245         default:
 1246                 break;
 1247         }
 1248 
 1249         /*
 1250          * Map control/status registers.
 1251          */
 1252         pci_enable_busmaster(dev);
 1253 
 1254         if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
 1255                 rid = XL_PCI_LOMEM;
 1256                 res = SYS_RES_MEMORY;
 1257 
 1258                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1259         }
 1260 
 1261         if (sc->xl_res != NULL) {
 1262                 sc->xl_flags |= XL_FLAG_USE_MMIO;
 1263                 if (bootverbose)
 1264                         device_printf(dev, "using memory mapped I/O\n");
 1265         } else {
 1266                 rid = XL_PCI_LOIO;
 1267                 res = SYS_RES_IOPORT;
 1268                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1269                 if (sc->xl_res == NULL) {
 1270                         device_printf(dev, "couldn't map ports/memory\n");
 1271                         error = ENXIO;
 1272                         goto fail;
 1273                 }
 1274                 if (bootverbose)
 1275                         device_printf(dev, "using port I/O\n");
 1276         }
 1277 
 1278         sc->xl_btag = rman_get_bustag(sc->xl_res);
 1279         sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
 1280 
 1281         if (sc->xl_flags & XL_FLAG_FUNCREG) {
 1282                 rid = XL_PCI_FUNCMEM;
 1283                 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1284                     RF_ACTIVE);
 1285 
 1286                 if (sc->xl_fres == NULL) {
 1287                         device_printf(dev, "couldn't map funcreg memory\n");
 1288                         error = ENXIO;
 1289                         goto fail;
 1290                 }
 1291 
 1292                 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
 1293                 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
 1294         }
 1295 
 1296         /* Allocate interrupt */
 1297         rid = 0;
 1298         sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1299             RF_SHAREABLE | RF_ACTIVE);
 1300         if (sc->xl_irq == NULL) {
 1301                 device_printf(dev, "couldn't map interrupt\n");
 1302                 error = ENXIO;
 1303                 goto fail;
 1304         }
 1305 
 1306         /* Initialize interface name. */
 1307         ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
 1308         if (ifp == NULL) {
 1309                 device_printf(dev, "can not if_alloc()\n");
 1310                 error = ENOSPC;
 1311                 goto fail;
 1312         }
 1313         ifp->if_softc = sc;
 1314         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1315 
 1316         /* Reset the adapter. */
 1317         XL_LOCK(sc);
 1318         xl_reset(sc);
 1319         XL_UNLOCK(sc);
 1320 
 1321         /*
 1322          * Get station address from the EEPROM.
 1323          */
 1324         if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
 1325                 device_printf(dev, "failed to read station address\n");
 1326                 error = ENXIO;
 1327                 goto fail;
 1328         }
 1329 
 1330         sc->xl_unit = unit;
 1331         callout_init_mtx(&sc->xl_stat_callout, &sc->xl_mtx, 0);
 1332         TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
 1333 
 1334         /*
 1335          * Now allocate a tag for the DMA descriptor lists and a chunk
 1336          * of DMA-able memory based on the tag.  Also obtain the DMA
 1337          * addresses of the RX and TX ring, which we'll need later.
 1338          * All of our lists are allocated as a contiguous block
 1339          * of memory.
 1340          */
 1341         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1342             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1343             XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
 1344             &sc->xl_ldata.xl_rx_tag);
 1345         if (error) {
 1346                 device_printf(dev, "failed to allocate rx dma tag\n");
 1347                 goto fail;
 1348         }
 1349 
 1350         error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
 1351             (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1352             &sc->xl_ldata.xl_rx_dmamap);
 1353         if (error) {
 1354                 device_printf(dev, "no memory for rx list buffers!\n");
 1355                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1356                 sc->xl_ldata.xl_rx_tag = NULL;
 1357                 goto fail;
 1358         }
 1359 
 1360         error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
 1361             sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
 1362             XL_RX_LIST_SZ, xl_dma_map_addr,
 1363             &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
 1364         if (error) {
 1365                 device_printf(dev, "cannot get dma address of the rx ring!\n");
 1366                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1367                     sc->xl_ldata.xl_rx_dmamap);
 1368                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1369                 sc->xl_ldata.xl_rx_tag = NULL;
 1370                 goto fail;
 1371         }
 1372 
 1373         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1374             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1375             XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
 1376             &sc->xl_ldata.xl_tx_tag);
 1377         if (error) {
 1378                 device_printf(dev, "failed to allocate tx dma tag\n");
 1379                 goto fail;
 1380         }
 1381 
 1382         error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
 1383             (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1384             &sc->xl_ldata.xl_tx_dmamap);
 1385         if (error) {
 1386                 device_printf(dev, "no memory for list buffers!\n");
 1387                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1388                 sc->xl_ldata.xl_tx_tag = NULL;
 1389                 goto fail;
 1390         }
 1391 
 1392         error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
 1393             sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
 1394             XL_TX_LIST_SZ, xl_dma_map_addr,
 1395             &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
 1396         if (error) {
 1397                 device_printf(dev, "cannot get dma address of the tx ring!\n");
 1398                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1399                     sc->xl_ldata.xl_tx_dmamap);
 1400                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1401                 sc->xl_ldata.xl_tx_tag = NULL;
 1402                 goto fail;
 1403         }
 1404 
 1405         /*
 1406          * Allocate a DMA tag for the mapping of mbufs.
 1407          */
 1408         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
 1409             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1410             MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
 1411             NULL, &sc->xl_mtag);
 1412         if (error) {
 1413                 device_printf(dev, "failed to allocate mbuf dma tag\n");
 1414                 goto fail;
 1415         }
 1416 
 1417         /* We need a spare DMA map for the RX ring. */
 1418         error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
 1419         if (error)
 1420                 goto fail;
 1421 
 1422         /*
 1423          * Figure out the card type. 3c905B adapters have the
 1424          * 'supportsNoTxLength' bit set in the capabilities
 1425          * word in the EEPROM.
 1426          * Note: my 3c575C cardbus card lies. It returns a value
 1427          * of 0x1578 for its capabilities word, which is somewhat
 1428          * nonsensical. Another way to distinguish a 3c90x chip
 1429          * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
 1430          * bit. This will only be set for 3c90x boomerage chips.
 1431          */
 1432         xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
 1433         if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
 1434             !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
 1435                 sc->xl_type = XL_TYPE_905B;
 1436         else
 1437                 sc->xl_type = XL_TYPE_90X;
 1438 
 1439         /* Set the TX start threshold for best performance. */
 1440         sc->xl_tx_thresh = XL_MIN_FRAMELEN;
 1441 
 1442         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1443         ifp->if_ioctl = xl_ioctl;
 1444         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1445         if (sc->xl_type == XL_TYPE_905B) {
 1446                 ifp->if_hwassist = XL905B_CSUM_FEATURES;
 1447 #ifdef XL905B_TXCSUM_BROKEN
 1448                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1449 #else
 1450                 ifp->if_capabilities |= IFCAP_HWCSUM;
 1451 #endif
 1452         }
 1453         ifp->if_capenable = ifp->if_capabilities;
 1454 #ifdef DEVICE_POLLING
 1455         ifp->if_capabilities |= IFCAP_POLLING;
 1456 #endif
 1457         ifp->if_start = xl_start;
 1458         ifp->if_init = xl_init;
 1459         IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
 1460         ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
 1461         IFQ_SET_READY(&ifp->if_snd);
 1462 
 1463         /*
 1464          * Now we have to see what sort of media we have.
 1465          * This includes probing for an MII interace and a
 1466          * possible PHY.
 1467          */
 1468         XL_SEL_WIN(3);
 1469         sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
 1470         if (bootverbose)
 1471                 device_printf(dev, "media options word: %x\n", sc->xl_media);
 1472 
 1473         xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
 1474         sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
 1475         sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
 1476         sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
 1477 
 1478         xl_mediacheck(sc);
 1479 
 1480         if (sc->xl_media & XL_MEDIAOPT_MII ||
 1481             sc->xl_media & XL_MEDIAOPT_BTX ||
 1482             sc->xl_media & XL_MEDIAOPT_BT4) {
 1483                 if (bootverbose)
 1484                         device_printf(dev, "found MII/AUTO\n");
 1485                 xl_setcfg(sc);
 1486                 if (mii_phy_probe(dev, &sc->xl_miibus,
 1487                     xl_ifmedia_upd, xl_ifmedia_sts)) {
 1488                         device_printf(dev, "no PHY found!\n");
 1489                         error = ENXIO;
 1490                         goto fail;
 1491                 }
 1492                 goto done;
 1493         }
 1494 
 1495         /*
 1496          * Sanity check. If the user has selected "auto" and this isn't
 1497          * a 10/100 card of some kind, we need to force the transceiver
 1498          * type to something sane.
 1499          */
 1500         if (sc->xl_xcvr == XL_XCVR_AUTO)
 1501                 xl_choose_xcvr(sc, bootverbose);
 1502 
 1503         /*
 1504          * Do ifmedia setup.
 1505          */
 1506         if (sc->xl_media & XL_MEDIAOPT_BT) {
 1507                 if (bootverbose)
 1508                         device_printf(dev, "found 10baseT\n");
 1509                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 1510                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
 1511                 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1512                         ifmedia_add(&sc->ifmedia,
 1513                             IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 1514         }
 1515 
 1516         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
 1517                 /*
 1518                  * Check for a 10baseFL board in disguise.
 1519                  */
 1520                 if (sc->xl_type == XL_TYPE_905B &&
 1521                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1522                         if (bootverbose)
 1523                                 device_printf(dev, "found 10baseFL\n");
 1524                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
 1525                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
 1526                             0, NULL);
 1527                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1528                                 ifmedia_add(&sc->ifmedia,
 1529                                     IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
 1530                 } else {
 1531                         if (bootverbose)
 1532                                 device_printf(dev, "found AUI\n");
 1533                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
 1534                 }
 1535         }
 1536 
 1537         if (sc->xl_media & XL_MEDIAOPT_BNC) {
 1538                 if (bootverbose)
 1539                         device_printf(dev, "found BNC\n");
 1540                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
 1541         }
 1542 
 1543         if (sc->xl_media & XL_MEDIAOPT_BFX) {
 1544                 if (bootverbose)
 1545                         device_printf(dev, "found 100baseFX\n");
 1546                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
 1547         }
 1548 
 1549         media = IFM_ETHER|IFM_100_TX|IFM_FDX;
 1550         xl_choose_media(sc, &media);
 1551 
 1552         if (sc->xl_miibus == NULL)
 1553                 ifmedia_set(&sc->ifmedia, media);
 1554 
 1555 done:
 1556         if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
 1557                 XL_SEL_WIN(0);
 1558                 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
 1559         }
 1560 
 1561         /*
 1562          * Call MI attach routine.
 1563          */
 1564         ether_ifattach(ifp, eaddr);
 1565 
 1566         error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1567             xl_intr, sc, &sc->xl_intrhand);
 1568         if (error) {
 1569                 device_printf(dev, "couldn't set up irq\n");
 1570                 ether_ifdetach(ifp);
 1571                 goto fail;
 1572         }
 1573 
 1574 fail:
 1575         if (error)
 1576                 xl_detach(dev);
 1577 
 1578         return (error);
 1579 }
 1580 
 1581 /*
 1582  * Choose a default media.
 1583  * XXX This is a leaf function only called by xl_attach() and
 1584  *     acquires/releases the non-recursible driver mutex to
 1585  *     satisfy lock assertions.
 1586  */
 1587 static void
 1588 xl_choose_media(struct xl_softc *sc, int *media)
 1589 {
 1590 
 1591         XL_LOCK(sc);
 1592 
 1593         switch (sc->xl_xcvr) {
 1594         case XL_XCVR_10BT:
 1595                 *media = IFM_ETHER|IFM_10_T;
 1596                 xl_setmode(sc, *media);
 1597                 break;
 1598         case XL_XCVR_AUI:
 1599                 if (sc->xl_type == XL_TYPE_905B &&
 1600                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1601                         *media = IFM_ETHER|IFM_10_FL;
 1602                         xl_setmode(sc, *media);
 1603                 } else {
 1604                         *media = IFM_ETHER|IFM_10_5;
 1605                         xl_setmode(sc, *media);
 1606                 }
 1607                 break;
 1608         case XL_XCVR_COAX:
 1609                 *media = IFM_ETHER|IFM_10_2;
 1610                 xl_setmode(sc, *media);
 1611                 break;
 1612         case XL_XCVR_AUTO:
 1613         case XL_XCVR_100BTX:
 1614         case XL_XCVR_MII:
 1615                 /* Chosen by miibus */
 1616                 break;
 1617         case XL_XCVR_100BFX:
 1618                 *media = IFM_ETHER|IFM_100_FX;
 1619                 break;
 1620         default:
 1621                 device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
 1622                     sc->xl_xcvr);
 1623                 /*
 1624                  * This will probably be wrong, but it prevents
 1625                  * the ifmedia code from panicking.
 1626                  */
 1627                 *media = IFM_ETHER|IFM_10_T;
 1628                 break;
 1629         }
 1630 
 1631         XL_UNLOCK(sc);
 1632 }
 1633 
 1634 /*
 1635  * Shutdown hardware and free up resources. This can be called any
 1636  * time after the mutex has been initialized. It is called in both
 1637  * the error case in attach and the normal detach case so it needs
 1638  * to be careful about only freeing resources that have actually been
 1639  * allocated.
 1640  */
 1641 static int
 1642 xl_detach(device_t dev)
 1643 {
 1644         struct xl_softc         *sc;
 1645         struct ifnet            *ifp;
 1646         int                     rid, res;
 1647 
 1648         sc = device_get_softc(dev);
 1649         ifp = sc->xl_ifp;
 1650 
 1651         KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
 1652 
 1653 #ifdef DEVICE_POLLING
 1654         if (ifp && ifp->if_capenable & IFCAP_POLLING)
 1655                 ether_poll_deregister(ifp);
 1656 #endif
 1657 
 1658         if (sc->xl_flags & XL_FLAG_USE_MMIO) {
 1659                 rid = XL_PCI_LOMEM;
 1660                 res = SYS_RES_MEMORY;
 1661         } else {
 1662                 rid = XL_PCI_LOIO;
 1663                 res = SYS_RES_IOPORT;
 1664         }
 1665 
 1666         /* These should only be active if attach succeeded */
 1667         if (device_is_attached(dev)) {
 1668                 XL_LOCK(sc);
 1669                 xl_reset(sc);
 1670                 xl_stop(sc);
 1671                 XL_UNLOCK(sc);
 1672                 taskqueue_drain(taskqueue_swi, &sc->xl_task);
 1673                 callout_drain(&sc->xl_stat_callout);
 1674                 ether_ifdetach(ifp);
 1675         }
 1676         if (sc->xl_miibus)
 1677                 device_delete_child(dev, sc->xl_miibus);
 1678         bus_generic_detach(dev);
 1679         ifmedia_removeall(&sc->ifmedia);
 1680 
 1681         if (sc->xl_intrhand)
 1682                 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
 1683         if (sc->xl_irq)
 1684                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
 1685         if (sc->xl_fres != NULL)
 1686                 bus_release_resource(dev, SYS_RES_MEMORY,
 1687                     XL_PCI_FUNCMEM, sc->xl_fres);
 1688         if (sc->xl_res)
 1689                 bus_release_resource(dev, res, rid, sc->xl_res);
 1690 
 1691         if (ifp)
 1692                 if_free(ifp);
 1693 
 1694         if (sc->xl_mtag) {
 1695                 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
 1696                 bus_dma_tag_destroy(sc->xl_mtag);
 1697         }
 1698         if (sc->xl_ldata.xl_rx_tag) {
 1699                 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
 1700                     sc->xl_ldata.xl_rx_dmamap);
 1701                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1702                     sc->xl_ldata.xl_rx_dmamap);
 1703                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1704         }
 1705         if (sc->xl_ldata.xl_tx_tag) {
 1706                 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
 1707                     sc->xl_ldata.xl_tx_dmamap);
 1708                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1709                     sc->xl_ldata.xl_tx_dmamap);
 1710                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1711         }
 1712 
 1713         mtx_destroy(&sc->xl_mtx);
 1714 
 1715         return (0);
 1716 }
 1717 
 1718 /*
 1719  * Initialize the transmit descriptors.
 1720  */
 1721 static int
 1722 xl_list_tx_init(struct xl_softc *sc)
 1723 {
 1724         struct xl_chain_data    *cd;
 1725         struct xl_list_data     *ld;
 1726         int                     error, i;
 1727 
 1728         XL_LOCK_ASSERT(sc);
 1729 
 1730         cd = &sc->xl_cdata;
 1731         ld = &sc->xl_ldata;
 1732         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1733                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1734                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1735                     &cd->xl_tx_chain[i].xl_map);
 1736                 if (error)
 1737                         return (error);
 1738                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1739                     i * sizeof(struct xl_list);
 1740                 if (i == (XL_TX_LIST_CNT - 1))
 1741                         cd->xl_tx_chain[i].xl_next = NULL;
 1742                 else
 1743                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1744         }
 1745 
 1746         cd->xl_tx_free = &cd->xl_tx_chain[0];
 1747         cd->xl_tx_tail = cd->xl_tx_head = NULL;
 1748 
 1749         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1750         return (0);
 1751 }
 1752 
 1753 /*
 1754  * Initialize the transmit descriptors.
 1755  */
 1756 static int
 1757 xl_list_tx_init_90xB(struct xl_softc *sc)
 1758 {
 1759         struct xl_chain_data    *cd;
 1760         struct xl_list_data     *ld;
 1761         int                     error, i;
 1762 
 1763         XL_LOCK_ASSERT(sc);
 1764 
 1765         cd = &sc->xl_cdata;
 1766         ld = &sc->xl_ldata;
 1767         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1768                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1769                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1770                     &cd->xl_tx_chain[i].xl_map);
 1771                 if (error)
 1772                         return (error);
 1773                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1774                     i * sizeof(struct xl_list);
 1775                 if (i == (XL_TX_LIST_CNT - 1))
 1776                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
 1777                 else
 1778                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1779                 if (i == 0)
 1780                         cd->xl_tx_chain[i].xl_prev =
 1781                             &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
 1782                 else
 1783                         cd->xl_tx_chain[i].xl_prev =
 1784                             &cd->xl_tx_chain[i - 1];
 1785         }
 1786 
 1787         bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
 1788         ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
 1789 
 1790         cd->xl_tx_prod = 1;
 1791         cd->xl_tx_cons = 1;
 1792         cd->xl_tx_cnt = 0;
 1793 
 1794         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1795         return (0);
 1796 }
 1797 
 1798 /*
 1799  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1800  * we arrange the descriptors in a closed ring, so that the last descriptor
 1801  * points back to the first.
 1802  */
 1803 static int
 1804 xl_list_rx_init(struct xl_softc *sc)
 1805 {
 1806         struct xl_chain_data    *cd;
 1807         struct xl_list_data     *ld;
 1808         int                     error, i, next;
 1809         u_int32_t               nextptr;
 1810 
 1811         XL_LOCK_ASSERT(sc);
 1812 
 1813         cd = &sc->xl_cdata;
 1814         ld = &sc->xl_ldata;
 1815 
 1816         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1817                 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
 1818                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1819                     &cd->xl_rx_chain[i].xl_map);
 1820                 if (error)
 1821                         return (error);
 1822                 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
 1823                 if (error)
 1824                         return (error);
 1825                 if (i == (XL_RX_LIST_CNT - 1))
 1826                         next = 0;
 1827                 else
 1828                         next = i + 1;
 1829                 nextptr = ld->xl_rx_dmaaddr +
 1830                     next * sizeof(struct xl_list_onefrag);
 1831                 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
 1832                 ld->xl_rx_list[i].xl_next = htole32(nextptr);
 1833         }
 1834 
 1835         bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1836         cd->xl_rx_head = &cd->xl_rx_chain[0];
 1837 
 1838         return (0);
 1839 }
 1840 
 1841 /*
 1842  * Initialize an RX descriptor and attach an MBUF cluster.
 1843  * If we fail to do so, we need to leave the old mbuf and
 1844  * the old DMA map untouched so that it can be reused.
 1845  */
 1846 static int
 1847 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
 1848 {
 1849         struct mbuf             *m_new = NULL;
 1850         bus_dmamap_t            map;
 1851         bus_dma_segment_t       segs[1];
 1852         int                     error, nseg;
 1853 
 1854         XL_LOCK_ASSERT(sc);
 1855 
 1856         m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1857         if (m_new == NULL)
 1858                 return (ENOBUFS);
 1859 
 1860         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
 1861 
 1862         /* Force longword alignment for packet payload. */
 1863         m_adj(m_new, ETHER_ALIGN);
 1864 
 1865         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new,
 1866             segs, &nseg, BUS_DMA_NOWAIT);
 1867         if (error) {
 1868                 m_freem(m_new);
 1869                 device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
 1870                     error);
 1871                 return (error);
 1872         }
 1873         KASSERT(nseg == 1,
 1874             ("%s: too many DMA segments (%d)", __func__, nseg));
 1875 
 1876         bus_dmamap_unload(sc->xl_mtag, c->xl_map);
 1877         map = c->xl_map;
 1878         c->xl_map = sc->xl_tmpmap;
 1879         sc->xl_tmpmap = map;
 1880         c->xl_mbuf = m_new;
 1881         c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
 1882         c->xl_ptr->xl_status = 0;
 1883         c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr);
 1884         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
 1885         return (0);
 1886 }
 1887 
 1888 static int
 1889 xl_rx_resync(struct xl_softc *sc)
 1890 {
 1891         struct xl_chain_onefrag *pos;
 1892         int                     i;
 1893 
 1894         XL_LOCK_ASSERT(sc);
 1895 
 1896         pos = sc->xl_cdata.xl_rx_head;
 1897 
 1898         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1899                 if (pos->xl_ptr->xl_status)
 1900                         break;
 1901                 pos = pos->xl_next;
 1902         }
 1903 
 1904         if (i == XL_RX_LIST_CNT)
 1905                 return (0);
 1906 
 1907         sc->xl_cdata.xl_rx_head = pos;
 1908 
 1909         return (EAGAIN);
 1910 }
 1911 
 1912 /*
 1913  * A frame has been uploaded: pass the resulting mbuf chain up to
 1914  * the higher level protocols.
 1915  */
 1916 static void
 1917 xl_rxeof(struct xl_softc *sc)
 1918 {
 1919         struct mbuf             *m;
 1920         struct ifnet            *ifp = sc->xl_ifp;
 1921         struct xl_chain_onefrag *cur_rx;
 1922         int                     total_len = 0;
 1923         u_int32_t               rxstat;
 1924 
 1925         XL_LOCK_ASSERT(sc);
 1926 again:
 1927         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
 1928             BUS_DMASYNC_POSTREAD);
 1929         while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
 1930 #ifdef DEVICE_POLLING
 1931                 if (ifp->if_capenable & IFCAP_POLLING) {
 1932                         if (sc->rxcycles <= 0)
 1933                                 break;
 1934                         sc->rxcycles--;
 1935                 }
 1936 #endif
 1937                 cur_rx = sc->xl_cdata.xl_rx_head;
 1938                 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
 1939                 total_len = rxstat & XL_RXSTAT_LENMASK;
 1940 
 1941                 /*
 1942                  * Since we have told the chip to allow large frames,
 1943                  * we need to trap giant frame errors in software. We allow
 1944                  * a little more than the normal frame size to account for
 1945                  * frames with VLAN tags.
 1946                  */
 1947                 if (total_len > XL_MAX_FRAMELEN)
 1948                         rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
 1949 
 1950                 /*
 1951                  * If an error occurs, update stats, clear the
 1952                  * status word and leave the mbuf cluster in place:
 1953                  * it should simply get re-used next time this descriptor
 1954                  * comes up in the ring.
 1955                  */
 1956                 if (rxstat & XL_RXSTAT_UP_ERROR) {
 1957                         ifp->if_ierrors++;
 1958                         cur_rx->xl_ptr->xl_status = 0;
 1959                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1960                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1961                         continue;
 1962                 }
 1963 
 1964                 /*
 1965                  * If the error bit was not set, the upload complete
 1966                  * bit should be set which means we have a valid packet.
 1967                  * If not, something truly strange has happened.
 1968                  */
 1969                 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
 1970                         device_printf(sc->xl_dev,
 1971                             "bad receive status -- packet dropped\n");
 1972                         ifp->if_ierrors++;
 1973                         cur_rx->xl_ptr->xl_status = 0;
 1974                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1975                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1976                         continue;
 1977                 }
 1978 
 1979                 /* No errors; receive the packet. */
 1980                 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
 1981                     BUS_DMASYNC_POSTREAD);
 1982                 m = cur_rx->xl_mbuf;
 1983 
 1984                 /*
 1985                  * Try to conjure up a new mbuf cluster. If that
 1986                  * fails, it means we have an out of memory condition and
 1987                  * should leave the buffer in place and continue. This will
 1988                  * result in a lost packet, but there's little else we
 1989                  * can do in this situation.
 1990                  */
 1991                 if (xl_newbuf(sc, cur_rx)) {
 1992                         ifp->if_ierrors++;
 1993                         cur_rx->xl_ptr->xl_status = 0;
 1994                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1995                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1996                         continue;
 1997                 }
 1998                 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 1999                     sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 2000 
 2001                 ifp->if_ipackets++;
 2002                 m->m_pkthdr.rcvif = ifp;
 2003                 m->m_pkthdr.len = m->m_len = total_len;
 2004 
 2005                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2006                         /* Do IP checksum checking. */
 2007                         if (rxstat & XL_RXSTAT_IPCKOK)
 2008                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2009                         if (!(rxstat & XL_RXSTAT_IPCKERR))
 2010                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2011                         if ((rxstat & XL_RXSTAT_TCPCOK &&
 2012                              !(rxstat & XL_RXSTAT_TCPCKERR)) ||
 2013                             (rxstat & XL_RXSTAT_UDPCKOK &&
 2014                              !(rxstat & XL_RXSTAT_UDPCKERR))) {
 2015                                 m->m_pkthdr.csum_flags |=
 2016                                         CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 2017                                 m->m_pkthdr.csum_data = 0xffff;
 2018                         }
 2019                 }
 2020 
 2021                 XL_UNLOCK(sc);
 2022                 (*ifp->if_input)(ifp, m);
 2023                 XL_LOCK(sc);
 2024 
 2025                 /*
 2026                  * If we are running from the taskqueue, the interface
 2027                  * might have been stopped while we were passing the last
 2028                  * packet up the network stack.
 2029                  */
 2030                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 2031                         return;
 2032         }
 2033 
 2034         /*
 2035          * Handle the 'end of channel' condition. When the upload
 2036          * engine hits the end of the RX ring, it will stall. This
 2037          * is our cue to flush the RX ring, reload the uplist pointer
 2038          * register and unstall the engine.
 2039          * XXX This is actually a little goofy. With the ThunderLAN
 2040          * chip, you get an interrupt when the receiver hits the end
 2041          * of the receive ring, which tells you exactly when you
 2042          * you need to reload the ring pointer. Here we have to
 2043          * fake it. I'm mad at myself for not being clever enough
 2044          * to avoid the use of a goto here.
 2045          */
 2046         if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
 2047                 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
 2048                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2049                 xl_wait(sc);
 2050                 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2051                 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
 2052                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2053                 goto again;
 2054         }
 2055 }
 2056 
 2057 /*
 2058  * Taskqueue wrapper for xl_rxeof().
 2059  */
 2060 static void
 2061 xl_rxeof_task(void *arg, int pending)
 2062 {
 2063         struct xl_softc *sc = (struct xl_softc *)arg;
 2064 
 2065         NET_LOCK_GIANT();
 2066         XL_LOCK(sc);
 2067         if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
 2068                 xl_rxeof(sc);
 2069         XL_UNLOCK(sc);
 2070         NET_UNLOCK_GIANT();
 2071 }
 2072 
 2073 /*
 2074  * A frame was downloaded to the chip. It's safe for us to clean up
 2075  * the list buffers.
 2076  */
 2077 static void
 2078 xl_txeof(struct xl_softc *sc)
 2079 {
 2080         struct xl_chain         *cur_tx;
 2081         struct ifnet            *ifp = sc->xl_ifp;
 2082 
 2083         XL_LOCK_ASSERT(sc);
 2084 
 2085         /*
 2086          * Go through our tx list and free mbufs for those
 2087          * frames that have been uploaded. Note: the 3c905B
 2088          * sets a special bit in the status word to let us
 2089          * know that a frame has been downloaded, but the
 2090          * original 3c900/3c905 adapters don't do that.
 2091          * Consequently, we have to use a different test if
 2092          * xl_type != XL_TYPE_905B.
 2093          */
 2094         while (sc->xl_cdata.xl_tx_head != NULL) {
 2095                 cur_tx = sc->xl_cdata.xl_tx_head;
 2096 
 2097                 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2098                         break;
 2099 
 2100                 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
 2101                 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2102                     BUS_DMASYNC_POSTWRITE);
 2103                 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2104                 m_freem(cur_tx->xl_mbuf);
 2105                 cur_tx->xl_mbuf = NULL;
 2106                 ifp->if_opackets++;
 2107                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2108 
 2109                 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
 2110                 sc->xl_cdata.xl_tx_free = cur_tx;
 2111         }
 2112 
 2113         if (sc->xl_cdata.xl_tx_head == NULL) {
 2114                 sc->xl_wdog_timer = 0;
 2115                 sc->xl_cdata.xl_tx_tail = NULL;
 2116         } else {
 2117                 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
 2118                         !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
 2119                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2120                                 sc->xl_cdata.xl_tx_head->xl_phys);
 2121                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2122                 }
 2123         }
 2124 }
 2125 
 2126 static void
 2127 xl_txeof_90xB(struct xl_softc *sc)
 2128 {
 2129         struct xl_chain         *cur_tx = NULL;
 2130         struct ifnet            *ifp = sc->xl_ifp;
 2131         int                     idx;
 2132 
 2133         XL_LOCK_ASSERT(sc);
 2134 
 2135         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2136             BUS_DMASYNC_POSTREAD);
 2137         idx = sc->xl_cdata.xl_tx_cons;
 2138         while (idx != sc->xl_cdata.xl_tx_prod) {
 2139                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2140 
 2141                 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
 2142                       XL_TXSTAT_DL_COMPLETE))
 2143                         break;
 2144 
 2145                 if (cur_tx->xl_mbuf != NULL) {
 2146                         bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2147                             BUS_DMASYNC_POSTWRITE);
 2148                         bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2149                         m_freem(cur_tx->xl_mbuf);
 2150                         cur_tx->xl_mbuf = NULL;
 2151                 }
 2152 
 2153                 ifp->if_opackets++;
 2154 
 2155                 sc->xl_cdata.xl_tx_cnt--;
 2156                 XL_INC(idx, XL_TX_LIST_CNT);
 2157         }
 2158 
 2159         if (sc->xl_cdata.xl_tx_cnt == 0)
 2160                 sc->xl_wdog_timer = 0;
 2161         sc->xl_cdata.xl_tx_cons = idx;
 2162 
 2163         if (cur_tx != NULL)
 2164                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2165 }
 2166 
 2167 /*
 2168  * TX 'end of channel' interrupt handler. Actually, we should
 2169  * only get a 'TX complete' interrupt if there's a transmit error,
 2170  * so this is really TX error handler.
 2171  */
 2172 static void
 2173 xl_txeoc(struct xl_softc *sc)
 2174 {
 2175         u_int8_t                txstat;
 2176 
 2177         XL_LOCK_ASSERT(sc);
 2178 
 2179         while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
 2180                 if (txstat & XL_TXSTATUS_UNDERRUN ||
 2181                         txstat & XL_TXSTATUS_JABBER ||
 2182                         txstat & XL_TXSTATUS_RECLAIM) {
 2183                         device_printf(sc->xl_dev,
 2184                             "transmission error: %x\n", txstat);
 2185                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2186                         xl_wait(sc);
 2187                         if (sc->xl_type == XL_TYPE_905B) {
 2188                                 if (sc->xl_cdata.xl_tx_cnt) {
 2189                                         int                     i;
 2190                                         struct xl_chain         *c;
 2191 
 2192                                         i = sc->xl_cdata.xl_tx_cons;
 2193                                         c = &sc->xl_cdata.xl_tx_chain[i];
 2194                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2195                                             c->xl_phys);
 2196                                         CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2197                                 }
 2198                         } else {
 2199                                 if (sc->xl_cdata.xl_tx_head != NULL)
 2200                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2201                                             sc->xl_cdata.xl_tx_head->xl_phys);
 2202                         }
 2203                         /*
 2204                          * Remember to set this for the
 2205                          * first generation 3c90X chips.
 2206                          */
 2207                         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2208                         if (txstat & XL_TXSTATUS_UNDERRUN &&
 2209                             sc->xl_tx_thresh < XL_PACKET_SIZE) {
 2210                                 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
 2211                                 device_printf(sc->xl_dev,
 2212 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
 2213                         }
 2214                         CSR_WRITE_2(sc, XL_COMMAND,
 2215                             XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2216                         if (sc->xl_type == XL_TYPE_905B) {
 2217                                 CSR_WRITE_2(sc, XL_COMMAND,
 2218                                 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2219                         }
 2220                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2221                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2222                 } else {
 2223                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2224                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2225                 }
 2226                 /*
 2227                  * Write an arbitrary byte to the TX_STATUS register
 2228                  * to clear this interrupt/error and advance to the next.
 2229                  */
 2230                 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
 2231         }
 2232 }
 2233 
 2234 static void
 2235 xl_intr(void *arg)
 2236 {
 2237         struct xl_softc         *sc = arg;
 2238         struct ifnet            *ifp = sc->xl_ifp;
 2239         u_int16_t               status;
 2240 
 2241         XL_LOCK(sc);
 2242 
 2243 #ifdef DEVICE_POLLING
 2244         if (ifp->if_capenable & IFCAP_POLLING) {
 2245                 XL_UNLOCK(sc);
 2246                 return;
 2247         }
 2248 #endif
 2249 
 2250         while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
 2251             status != 0xFFFF) {
 2252                 CSR_WRITE_2(sc, XL_COMMAND,
 2253                     XL_CMD_INTR_ACK|(status & XL_INTRS));
 2254 
 2255                 if (status & XL_STAT_UP_COMPLETE) {
 2256                         int     curpkts;
 2257 
 2258                         curpkts = ifp->if_ipackets;
 2259                         xl_rxeof(sc);
 2260                         if (curpkts == ifp->if_ipackets) {
 2261                                 while (xl_rx_resync(sc))
 2262                                         xl_rxeof(sc);
 2263                         }
 2264                 }
 2265 
 2266                 if (status & XL_STAT_DOWN_COMPLETE) {
 2267                         if (sc->xl_type == XL_TYPE_905B)
 2268                                 xl_txeof_90xB(sc);
 2269                         else
 2270                                 xl_txeof(sc);
 2271                 }
 2272 
 2273                 if (status & XL_STAT_TX_COMPLETE) {
 2274                         ifp->if_oerrors++;
 2275                         xl_txeoc(sc);
 2276                 }
 2277 
 2278                 if (status & XL_STAT_ADFAIL) {
 2279                         xl_reset(sc);
 2280                         xl_init_locked(sc);
 2281                 }
 2282 
 2283                 if (status & XL_STAT_STATSOFLOW) {
 2284                         sc->xl_stats_no_timeout = 1;
 2285                         xl_stats_update_locked(sc);
 2286                         sc->xl_stats_no_timeout = 0;
 2287                 }
 2288         }
 2289 
 2290         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2291                 if (sc->xl_type == XL_TYPE_905B)
 2292                         xl_start_90xB_locked(ifp);
 2293                 else
 2294                         xl_start_locked(ifp);
 2295         }
 2296 
 2297         XL_UNLOCK(sc);
 2298 }
 2299 
 2300 #ifdef DEVICE_POLLING
 2301 static void
 2302 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2303 {
 2304         struct xl_softc *sc = ifp->if_softc;
 2305 
 2306         XL_LOCK(sc);
 2307         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2308                 xl_poll_locked(ifp, cmd, count);
 2309         XL_UNLOCK(sc);
 2310 }
 2311 
 2312 static void
 2313 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2314 {
 2315         struct xl_softc *sc = ifp->if_softc;
 2316 
 2317         XL_LOCK_ASSERT(sc);
 2318 
 2319         sc->rxcycles = count;
 2320         xl_rxeof(sc);
 2321         if (sc->xl_type == XL_TYPE_905B)
 2322                 xl_txeof_90xB(sc);
 2323         else
 2324                 xl_txeof(sc);
 2325 
 2326         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2327                 if (sc->xl_type == XL_TYPE_905B)
 2328                         xl_start_90xB_locked(ifp);
 2329                 else
 2330                         xl_start_locked(ifp);
 2331         }
 2332 
 2333         if (cmd == POLL_AND_CHECK_STATUS) {
 2334                 u_int16_t status;
 2335 
 2336                 status = CSR_READ_2(sc, XL_STATUS);
 2337                 if (status & XL_INTRS && status != 0xFFFF) {
 2338                         CSR_WRITE_2(sc, XL_COMMAND,
 2339                             XL_CMD_INTR_ACK|(status & XL_INTRS));
 2340 
 2341                         if (status & XL_STAT_TX_COMPLETE) {
 2342                                 ifp->if_oerrors++;
 2343                                 xl_txeoc(sc);
 2344                         }
 2345 
 2346                         if (status & XL_STAT_ADFAIL) {
 2347                                 xl_reset(sc);
 2348                                 xl_init_locked(sc);
 2349                         }
 2350 
 2351                         if (status & XL_STAT_STATSOFLOW) {
 2352                                 sc->xl_stats_no_timeout = 1;
 2353                                 xl_stats_update_locked(sc);
 2354                                 sc->xl_stats_no_timeout = 0;
 2355                         }
 2356                 }
 2357         }
 2358 }
 2359 #endif /* DEVICE_POLLING */
 2360 
 2361 /*
 2362  * XXX: This is an entry point for callout which needs to take the lock.
 2363  */
 2364 static void
 2365 xl_stats_update(void *xsc)
 2366 {
 2367         struct xl_softc *sc = xsc;
 2368 
 2369         XL_LOCK_ASSERT(sc);
 2370 
 2371         if (xl_watchdog(sc) == EJUSTRETURN)
 2372                 return;
 2373 
 2374         xl_stats_update_locked(sc);
 2375 }
 2376 
 2377 static void
 2378 xl_stats_update_locked(struct xl_softc *sc)
 2379 {
 2380         struct ifnet            *ifp = sc->xl_ifp;
 2381         struct xl_stats         xl_stats;
 2382         u_int8_t                *p;
 2383         int                     i;
 2384         struct mii_data         *mii = NULL;
 2385 
 2386         XL_LOCK_ASSERT(sc);
 2387 
 2388         bzero((char *)&xl_stats, sizeof(struct xl_stats));
 2389 
 2390         if (sc->xl_miibus != NULL)
 2391                 mii = device_get_softc(sc->xl_miibus);
 2392 
 2393         p = (u_int8_t *)&xl_stats;
 2394 
 2395         /* Read all the stats registers. */
 2396         XL_SEL_WIN(6);
 2397 
 2398         for (i = 0; i < 16; i++)
 2399                 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
 2400 
 2401         ifp->if_ierrors += xl_stats.xl_rx_overrun;
 2402 
 2403         ifp->if_collisions += xl_stats.xl_tx_multi_collision +
 2404             xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
 2405 
 2406         /*
 2407          * Boomerang and cyclone chips have an extra stats counter
 2408          * in window 4 (BadSSD). We have to read this too in order
 2409          * to clear out all the stats registers and avoid a statsoflow
 2410          * interrupt.
 2411          */
 2412         XL_SEL_WIN(4);
 2413         CSR_READ_1(sc, XL_W4_BADSSD);
 2414 
 2415         if ((mii != NULL) && (!sc->xl_stats_no_timeout))
 2416                 mii_tick(mii);
 2417 
 2418         XL_SEL_WIN(7);
 2419 
 2420         if (!sc->xl_stats_no_timeout)
 2421                 callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 2422 }
 2423 
 2424 /*
 2425  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 2426  * pointers to the fragment pointers.
 2427  */
 2428 static int
 2429 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head)
 2430 {
 2431         struct mbuf             *m_new;
 2432         struct ifnet            *ifp = sc->xl_ifp;
 2433         int                     error, i, nseg, total_len;
 2434         u_int32_t               status;
 2435 
 2436         XL_LOCK_ASSERT(sc);
 2437 
 2438         error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head,
 2439             sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2440 
 2441         if (error && error != EFBIG) {
 2442                 if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2443                 return (error);
 2444         }
 2445 
 2446         /*
 2447          * Handle special case: we used up all 63 fragments,
 2448          * but we have more mbufs left in the chain. Copy the
 2449          * data into an mbuf cluster. Note that we don't
 2450          * bother clearing the values in the other fragment
 2451          * pointers/counters; it wouldn't gain us anything,
 2452          * and would waste cycles.
 2453          */
 2454         if (error) {
 2455                 m_new = m_defrag(*m_head, M_DONTWAIT);
 2456                 if (m_new == NULL) {
 2457                         m_freem(*m_head);
 2458                         *m_head = NULL;
 2459                         return (ENOBUFS);
 2460                 }
 2461                 *m_head = m_new;
 2462 
 2463                 error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map,
 2464                     *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT);
 2465                 if (error) {
 2466                         m_freem(*m_head);
 2467                         *m_head = NULL;
 2468                         if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2469                         return (error);
 2470                 }
 2471         }
 2472 
 2473         KASSERT(nseg <= XL_MAXFRAGS,
 2474             ("%s: too many DMA segments (%d)", __func__, nseg));
 2475         if (nseg == 0) {
 2476                 m_freem(*m_head);
 2477                 *m_head = NULL;
 2478                 return (EIO);
 2479         }
 2480 
 2481         total_len = 0;
 2482         for (i = 0; i < nseg; i++) {
 2483                 KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES,
 2484                     ("segment size too large"));
 2485                 c->xl_ptr->xl_frag[i].xl_addr =
 2486                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr);
 2487                 c->xl_ptr->xl_frag[i].xl_len =
 2488                     htole32(sc->xl_cdata.xl_tx_segs[i].ds_len);
 2489                 total_len += sc->xl_cdata.xl_tx_segs[i].ds_len;
 2490         }
 2491         c->xl_ptr->xl_frag[nseg - 1].xl_len =
 2492             htole32(sc->xl_cdata.xl_tx_segs[nseg - 1].ds_len | XL_LAST_FRAG);
 2493         c->xl_ptr->xl_status = htole32(total_len);
 2494         c->xl_ptr->xl_next = 0;
 2495 
 2496         if (sc->xl_type == XL_TYPE_905B) {
 2497                 status = XL_TXSTAT_RND_DEFEAT;
 2498 
 2499 #ifndef XL905B_TXCSUM_BROKEN
 2500                 if (m_head->m_pkthdr.csum_flags) {
 2501                         if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 2502                                 status |= XL_TXSTAT_IPCKSUM;
 2503                         if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
 2504                                 status |= XL_TXSTAT_TCPCKSUM;
 2505                         if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
 2506                                 status |= XL_TXSTAT_UDPCKSUM;
 2507                 }
 2508 #endif
 2509                 c->xl_ptr->xl_status = htole32(status);
 2510         }
 2511 
 2512         c->xl_mbuf = *m_head;
 2513         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
 2514         return (0);
 2515 }
 2516 
 2517 /*
 2518  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 2519  * to the mbuf data regions directly in the transmit lists. We also save a
 2520  * copy of the pointers since the transmit list fragment pointers are
 2521  * physical addresses.
 2522  */
 2523 
 2524 static void
 2525 xl_start(struct ifnet *ifp)
 2526 {
 2527         struct xl_softc         *sc = ifp->if_softc;
 2528 
 2529         XL_LOCK(sc);
 2530 
 2531         if (sc->xl_type == XL_TYPE_905B)
 2532                 xl_start_90xB_locked(ifp);
 2533         else
 2534                 xl_start_locked(ifp);
 2535 
 2536         XL_UNLOCK(sc);
 2537 }
 2538 
 2539 static void
 2540 xl_start_locked(struct ifnet *ifp)
 2541 {
 2542         struct xl_softc         *sc = ifp->if_softc;
 2543         struct mbuf             *m_head = NULL;
 2544         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2545         u_int32_t               status;
 2546         int                     error;
 2547 
 2548         XL_LOCK_ASSERT(sc);
 2549 
 2550         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2551             IFF_DRV_RUNNING)
 2552                 return;
 2553         /*
 2554          * Check for an available queue slot. If there are none,
 2555          * punt.
 2556          */
 2557         if (sc->xl_cdata.xl_tx_free == NULL) {
 2558                 xl_txeoc(sc);
 2559                 xl_txeof(sc);
 2560                 if (sc->xl_cdata.xl_tx_free == NULL) {
 2561                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2562                         return;
 2563                 }
 2564         }
 2565 
 2566         start_tx = sc->xl_cdata.xl_tx_free;
 2567 
 2568         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2569             sc->xl_cdata.xl_tx_free != NULL;) {
 2570                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2571                 if (m_head == NULL)
 2572                         break;
 2573 
 2574                 /* Pick a descriptor off the free list. */
 2575                 cur_tx = sc->xl_cdata.xl_tx_free;
 2576 
 2577                 /* Pack the data into the descriptor. */
 2578                 error = xl_encap(sc, cur_tx, &m_head);
 2579                 if (error) {
 2580                         if (m_head == NULL)
 2581                                 break;
 2582                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2583                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2584                         break;
 2585                 }
 2586 
 2587                 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
 2588                 cur_tx->xl_next = NULL;
 2589 
 2590                 /* Chain it together. */
 2591                 if (prev != NULL) {
 2592                         prev->xl_next = cur_tx;
 2593                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2594                 }
 2595                 prev = cur_tx;
 2596 
 2597                 /*
 2598                  * If there's a BPF listener, bounce a copy of this frame
 2599                  * to him.
 2600                  */
 2601                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2602         }
 2603 
 2604         /*
 2605          * If there are no packets queued, bail.
 2606          */
 2607         if (cur_tx == NULL)
 2608                 return;
 2609 
 2610         /*
 2611          * Place the request for the upload interrupt
 2612          * in the last descriptor in the chain. This way, if
 2613          * we're chaining several packets at once, we'll only
 2614          * get an interrupt once for the whole chain rather than
 2615          * once for each packet.
 2616          */
 2617         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2618             XL_TXSTAT_DL_INTR);
 2619         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2620             BUS_DMASYNC_PREWRITE);
 2621 
 2622         /*
 2623          * Queue the packets. If the TX channel is clear, update
 2624          * the downlist pointer register.
 2625          */
 2626         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2627         xl_wait(sc);
 2628 
 2629         if (sc->xl_cdata.xl_tx_head != NULL) {
 2630                 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
 2631                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
 2632                     htole32(start_tx->xl_phys);
 2633                 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
 2634                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
 2635                     htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
 2636                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2637         } else {
 2638                 sc->xl_cdata.xl_tx_head = start_tx;
 2639                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2640         }
 2641         if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2642                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
 2643 
 2644         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2645 
 2646         XL_SEL_WIN(7);
 2647 
 2648         /*
 2649          * Set a timeout in case the chip goes out to lunch.
 2650          */
 2651         sc->xl_wdog_timer = 5;
 2652 
 2653         /*
 2654          * XXX Under certain conditions, usually on slower machines
 2655          * where interrupts may be dropped, it's possible for the
 2656          * adapter to chew up all the buffers in the receive ring
 2657          * and stall, without us being able to do anything about it.
 2658          * To guard against this, we need to make a pass over the
 2659          * RX queue to make sure there aren't any packets pending.
 2660          * Doing it here means we can flush the receive ring at the
 2661          * same time the chip is DMAing the transmit descriptors we
 2662          * just gave it.
 2663          *
 2664          * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
 2665          * nature of their chips in all their marketing literature;
 2666          * we may as well take advantage of it. :)
 2667          */
 2668         taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
 2669 }
 2670 
 2671 static void
 2672 xl_start_90xB_locked(struct ifnet *ifp)
 2673 {
 2674         struct xl_softc         *sc = ifp->if_softc;
 2675         struct mbuf             *m_head = NULL;
 2676         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2677         int                     error, idx;
 2678 
 2679         XL_LOCK_ASSERT(sc);
 2680 
 2681         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2682             IFF_DRV_RUNNING)
 2683                 return;
 2684 
 2685         idx = sc->xl_cdata.xl_tx_prod;
 2686         start_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2687 
 2688         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2689             sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) {
 2690                 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
 2691                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2692                         break;
 2693                 }
 2694 
 2695                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2696                 if (m_head == NULL)
 2697                         break;
 2698 
 2699                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2700 
 2701                 /* Pack the data into the descriptor. */
 2702                 error = xl_encap(sc, cur_tx, &m_head);
 2703                 if (error) {
 2704                         if (m_head == NULL)
 2705                                 break;
 2706                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2707                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2708                         break;
 2709                 }
 2710 
 2711                 /* Chain it together. */
 2712                 if (prev != NULL)
 2713                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2714                 prev = cur_tx;
 2715 
 2716                 /*
 2717                  * If there's a BPF listener, bounce a copy of this frame
 2718                  * to him.
 2719                  */
 2720                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2721 
 2722                 XL_INC(idx, XL_TX_LIST_CNT);
 2723                 sc->xl_cdata.xl_tx_cnt++;
 2724         }
 2725 
 2726         /*
 2727          * If there are no packets queued, bail.
 2728          */
 2729         if (cur_tx == NULL)
 2730                 return;
 2731 
 2732         /*
 2733          * Place the request for the upload interrupt
 2734          * in the last descriptor in the chain. This way, if
 2735          * we're chaining several packets at once, we'll only
 2736          * get an interrupt once for the whole chain rather than
 2737          * once for each packet.
 2738          */
 2739         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2740             XL_TXSTAT_DL_INTR);
 2741         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2742             BUS_DMASYNC_PREWRITE);
 2743 
 2744         /* Start transmission */
 2745         sc->xl_cdata.xl_tx_prod = idx;
 2746         start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
 2747 
 2748         /*
 2749          * Set a timeout in case the chip goes out to lunch.
 2750          */
 2751         sc->xl_wdog_timer = 5;
 2752 }
 2753 
 2754 static void
 2755 xl_init(void *xsc)
 2756 {
 2757         struct xl_softc         *sc = xsc;
 2758 
 2759         XL_LOCK(sc);
 2760         xl_init_locked(sc);
 2761         XL_UNLOCK(sc);
 2762 }
 2763 
 2764 static void
 2765 xl_init_locked(struct xl_softc *sc)
 2766 {
 2767         struct ifnet            *ifp = sc->xl_ifp;
 2768         int                     error, i;
 2769         u_int16_t               rxfilt = 0;
 2770         struct mii_data         *mii = NULL;
 2771 
 2772         XL_LOCK_ASSERT(sc);
 2773 
 2774         /*
 2775          * Cancel pending I/O and free all RX/TX buffers.
 2776          */
 2777         xl_stop(sc);
 2778 
 2779         if (sc->xl_miibus == NULL) {
 2780                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2781                 xl_wait(sc);
 2782         }
 2783         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2784         xl_wait(sc);
 2785         DELAY(10000);
 2786 
 2787         if (sc->xl_miibus != NULL)
 2788                 mii = device_get_softc(sc->xl_miibus);
 2789 
 2790         /* Init our MAC address */
 2791         XL_SEL_WIN(2);
 2792         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2793                 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
 2794                                 IFP2ENADDR(sc->xl_ifp)[i]);
 2795         }
 2796 
 2797         /* Clear the station mask. */
 2798         for (i = 0; i < 3; i++)
 2799                 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
 2800 #ifdef notdef
 2801         /* Reset TX and RX. */
 2802         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2803         xl_wait(sc);
 2804         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2805         xl_wait(sc);
 2806 #endif
 2807         /* Init circular RX list. */
 2808         error = xl_list_rx_init(sc);
 2809         if (error) {
 2810                 device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
 2811                     error);
 2812                 xl_stop(sc);
 2813                 return;
 2814         }
 2815 
 2816         /* Init TX descriptors. */
 2817         if (sc->xl_type == XL_TYPE_905B)
 2818                 error = xl_list_tx_init_90xB(sc);
 2819         else
 2820                 error = xl_list_tx_init(sc);
 2821         if (error) {
 2822                 device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
 2823                     error);
 2824                 xl_stop(sc);
 2825                 return;
 2826         }
 2827 
 2828         /*
 2829          * Set the TX freethresh value.
 2830          * Note that this has no effect on 3c905B "cyclone"
 2831          * cards but is required for 3c900/3c905 "boomerang"
 2832          * cards in order to enable the download engine.
 2833          */
 2834         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2835 
 2836         /* Set the TX start threshold for best performance. */
 2837         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2838 
 2839         /*
 2840          * If this is a 3c905B, also set the tx reclaim threshold.
 2841          * This helps cut down on the number of tx reclaim errors
 2842          * that could happen on a busy network. The chip multiplies
 2843          * the register value by 16 to obtain the actual threshold
 2844          * in bytes, so we divide by 16 when setting the value here.
 2845          * The existing threshold value can be examined by reading
 2846          * the register at offset 9 in window 5.
 2847          */
 2848         if (sc->xl_type == XL_TYPE_905B) {
 2849                 CSR_WRITE_2(sc, XL_COMMAND,
 2850                     XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2851         }
 2852 
 2853         /* Set RX filter bits. */
 2854         XL_SEL_WIN(5);
 2855         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 2856 
 2857         /* Set the individual bit to receive frames for this host only. */
 2858         rxfilt |= XL_RXFILTER_INDIVIDUAL;
 2859 
 2860         /* If we want promiscuous mode, set the allframes bit. */
 2861         if (ifp->if_flags & IFF_PROMISC) {
 2862                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 2863                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2864         } else {
 2865                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 2866                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2867         }
 2868 
 2869         /*
 2870          * Set capture broadcast bit to capture broadcast frames.
 2871          */
 2872         if (ifp->if_flags & IFF_BROADCAST) {
 2873                 rxfilt |= XL_RXFILTER_BROADCAST;
 2874                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2875         } else {
 2876                 rxfilt &= ~XL_RXFILTER_BROADCAST;
 2877                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2878         }
 2879 
 2880         /*
 2881          * Program the multicast filter, if necessary.
 2882          */
 2883         if (sc->xl_type == XL_TYPE_905B)
 2884                 xl_setmulti_hash(sc);
 2885         else
 2886                 xl_setmulti(sc);
 2887 
 2888         /*
 2889          * Load the address of the RX list. We have to
 2890          * stall the upload engine before we can manipulate
 2891          * the uplist pointer register, then unstall it when
 2892          * we're finished. We also have to wait for the
 2893          * stall command to complete before proceeding.
 2894          * Note that we have to do this after any RX resets
 2895          * have completed since the uplist register is cleared
 2896          * by a reset.
 2897          */
 2898         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2899         xl_wait(sc);
 2900         CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2901         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2902         xl_wait(sc);
 2903 
 2904         if (sc->xl_type == XL_TYPE_905B) {
 2905                 /* Set polling interval */
 2906                 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2907                 /* Load the address of the TX list */
 2908                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2909                 xl_wait(sc);
 2910                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2911                     sc->xl_cdata.xl_tx_chain[0].xl_phys);
 2912                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2913                 xl_wait(sc);
 2914         }
 2915 
 2916         /*
 2917          * If the coax transceiver is on, make sure to enable
 2918          * the DC-DC converter.
 2919          */
 2920         XL_SEL_WIN(3);
 2921         if (sc->xl_xcvr == XL_XCVR_COAX)
 2922                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
 2923         else
 2924                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 2925 
 2926         /*
 2927          * increase packet size to allow reception of 802.1q or ISL packets.
 2928          * For the 3c90x chip, set the 'allow large packets' bit in the MAC
 2929          * control register. For 3c90xB/C chips, use the RX packet size
 2930          * register.
 2931          */
 2932 
 2933         if (sc->xl_type == XL_TYPE_905B)
 2934                 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
 2935         else {
 2936                 u_int8_t macctl;
 2937                 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
 2938                 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
 2939                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
 2940         }
 2941 
 2942         /* Clear out the stats counters. */
 2943         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 2944         sc->xl_stats_no_timeout = 1;
 2945         xl_stats_update_locked(sc);
 2946         sc->xl_stats_no_timeout = 0;
 2947         XL_SEL_WIN(4);
 2948         CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
 2949         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
 2950 
 2951         /*
 2952          * Enable interrupts.
 2953          */
 2954         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 2955         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
 2956 #ifdef DEVICE_POLLING
 2957         /* Disable interrupts if we are polling. */
 2958         if (ifp->if_capenable & IFCAP_POLLING)
 2959                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 2960         else
 2961 #endif
 2962         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 2963         if (sc->xl_flags & XL_FLAG_FUNCREG)
 2964             bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 2965 
 2966         /* Set the RX early threshold */
 2967         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
 2968         CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
 2969 
 2970         /* Enable receiver and transmitter. */
 2971         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2972         xl_wait(sc);
 2973         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 2974         xl_wait(sc);
 2975 
 2976         /* XXX Downcall to miibus. */
 2977         if (mii != NULL)
 2978                 mii_mediachg(mii);
 2979 
 2980         /* Select window 7 for normal operations. */
 2981         XL_SEL_WIN(7);
 2982 
 2983         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 2984         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2985 
 2986         sc->xl_wdog_timer = 0;
 2987         callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 2988 }
 2989 
 2990 /*
 2991  * Set media options.
 2992  */
 2993 static int
 2994 xl_ifmedia_upd(struct ifnet *ifp)
 2995 {
 2996         struct xl_softc         *sc = ifp->if_softc;
 2997         struct ifmedia          *ifm = NULL;
 2998         struct mii_data         *mii = NULL;
 2999 
 3000         XL_LOCK(sc);
 3001 
 3002         if (sc->xl_miibus != NULL)
 3003                 mii = device_get_softc(sc->xl_miibus);
 3004         if (mii == NULL)
 3005                 ifm = &sc->ifmedia;
 3006         else
 3007                 ifm = &mii->mii_media;
 3008 
 3009         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 3010         case IFM_100_FX:
 3011         case IFM_10_FL:
 3012         case IFM_10_2:
 3013         case IFM_10_5:
 3014                 xl_setmode(sc, ifm->ifm_media);
 3015                 XL_UNLOCK(sc);
 3016                 return (0);
 3017         }
 3018 
 3019         if (sc->xl_media & XL_MEDIAOPT_MII ||
 3020             sc->xl_media & XL_MEDIAOPT_BTX ||
 3021             sc->xl_media & XL_MEDIAOPT_BT4) {
 3022                 xl_init_locked(sc);
 3023         } else {
 3024                 xl_setmode(sc, ifm->ifm_media);
 3025         }
 3026 
 3027         XL_UNLOCK(sc);
 3028 
 3029         return (0);
 3030 }
 3031 
 3032 /*
 3033  * Report current media status.
 3034  */
 3035 static void
 3036 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 3037 {
 3038         struct xl_softc         *sc = ifp->if_softc;
 3039         u_int32_t               icfg;
 3040         u_int16_t               status = 0;
 3041         struct mii_data         *mii = NULL;
 3042 
 3043         XL_LOCK(sc);
 3044 
 3045         if (sc->xl_miibus != NULL)
 3046                 mii = device_get_softc(sc->xl_miibus);
 3047 
 3048         XL_SEL_WIN(4);
 3049         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3050 
 3051         XL_SEL_WIN(3);
 3052         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
 3053         icfg >>= XL_ICFG_CONNECTOR_BITS;
 3054 
 3055         ifmr->ifm_active = IFM_ETHER;
 3056         ifmr->ifm_status = IFM_AVALID;
 3057 
 3058         if ((status & XL_MEDIASTAT_CARRIER) == 0)
 3059                 ifmr->ifm_status |= IFM_ACTIVE;
 3060 
 3061         switch (icfg) {
 3062         case XL_XCVR_10BT:
 3063                 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
 3064                 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3065                         ifmr->ifm_active |= IFM_FDX;
 3066                 else
 3067                         ifmr->ifm_active |= IFM_HDX;
 3068                 break;
 3069         case XL_XCVR_AUI:
 3070                 if (sc->xl_type == XL_TYPE_905B &&
 3071                     sc->xl_media == XL_MEDIAOPT_10FL) {
 3072                         ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
 3073                         if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3074                                 ifmr->ifm_active |= IFM_FDX;
 3075                         else
 3076                                 ifmr->ifm_active |= IFM_HDX;
 3077                 } else
 3078                         ifmr->ifm_active = IFM_ETHER|IFM_10_5;
 3079                 break;
 3080         case XL_XCVR_COAX:
 3081                 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
 3082                 break;
 3083         /*
 3084          * XXX MII and BTX/AUTO should be separate cases.
 3085          */
 3086 
 3087         case XL_XCVR_100BTX:
 3088         case XL_XCVR_AUTO:
 3089         case XL_XCVR_MII:
 3090                 if (mii != NULL) {
 3091                         mii_pollstat(mii);
 3092                         ifmr->ifm_active = mii->mii_media_active;
 3093                         ifmr->ifm_status = mii->mii_media_status;
 3094                 }
 3095                 break;
 3096         case XL_XCVR_100BFX:
 3097                 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
 3098                 break;
 3099         default:
 3100                 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
 3101                 break;
 3102         }
 3103 
 3104         XL_UNLOCK(sc);
 3105 }
 3106 
 3107 static int
 3108 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 3109 {
 3110         struct xl_softc         *sc = ifp->if_softc;
 3111         struct ifreq            *ifr = (struct ifreq *) data;
 3112         int                     error = 0;
 3113         struct mii_data         *mii = NULL;
 3114         u_int8_t                rxfilt;
 3115 
 3116         switch (command) {
 3117         case SIOCSIFFLAGS:
 3118                 XL_LOCK(sc);
 3119 
 3120                 XL_SEL_WIN(5);
 3121                 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 3122                 if (ifp->if_flags & IFF_UP) {
 3123                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3124                             ifp->if_flags & IFF_PROMISC &&
 3125                             !(sc->xl_if_flags & IFF_PROMISC)) {
 3126                                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 3127                                 CSR_WRITE_2(sc, XL_COMMAND,
 3128                                     XL_CMD_RX_SET_FILT|rxfilt);
 3129                                 XL_SEL_WIN(7);
 3130                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3131                             !(ifp->if_flags & IFF_PROMISC) &&
 3132                             sc->xl_if_flags & IFF_PROMISC) {
 3133                                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 3134                                 CSR_WRITE_2(sc, XL_COMMAND,
 3135                                     XL_CMD_RX_SET_FILT|rxfilt);
 3136                                 XL_SEL_WIN(7);
 3137                         } else {
 3138                                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 3139                                         xl_init_locked(sc);
 3140                         }
 3141                 } else {
 3142                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3143                                 xl_stop(sc);
 3144                 }
 3145                 sc->xl_if_flags = ifp->if_flags;
 3146                 XL_UNLOCK(sc);
 3147                 error = 0;
 3148                 break;
 3149         case SIOCADDMULTI:
 3150         case SIOCDELMULTI:
 3151                 /* XXX Downcall from if_addmulti() possibly with locks held. */
 3152                 XL_LOCK(sc);
 3153                 if (sc->xl_type == XL_TYPE_905B)
 3154                         xl_setmulti_hash(sc);
 3155                 else
 3156                         xl_setmulti(sc);
 3157                 XL_UNLOCK(sc);
 3158                 error = 0;
 3159                 break;
 3160         case SIOCGIFMEDIA:
 3161         case SIOCSIFMEDIA:
 3162                 if (sc->xl_miibus != NULL)
 3163                         mii = device_get_softc(sc->xl_miibus);
 3164                 if (mii == NULL)
 3165                         error = ifmedia_ioctl(ifp, ifr,
 3166                             &sc->ifmedia, command);
 3167                 else
 3168                         error = ifmedia_ioctl(ifp, ifr,
 3169                             &mii->mii_media, command);
 3170                 break;
 3171         case SIOCSIFCAP:
 3172 #ifdef DEVICE_POLLING
 3173                 if (ifr->ifr_reqcap & IFCAP_POLLING &&
 3174                     !(ifp->if_capenable & IFCAP_POLLING)) {
 3175                         error = ether_poll_register(xl_poll, ifp);
 3176                         if (error)
 3177                                 return(error);
 3178                         XL_LOCK(sc);
 3179                         /* Disable interrupts */
 3180                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3181                         ifp->if_capenable |= IFCAP_POLLING;
 3182                         XL_UNLOCK(sc);
 3183                         return (error);
 3184                 }
 3185                 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
 3186                     ifp->if_capenable & IFCAP_POLLING) {
 3187                         error = ether_poll_deregister(ifp);
 3188                         /* Enable interrupts. */
 3189                         XL_LOCK(sc);
 3190                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 3191                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 3192                         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3193                                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
 3194                                     4, 0x8000);
 3195                         ifp->if_capenable &= ~IFCAP_POLLING;
 3196                         XL_UNLOCK(sc);
 3197                         return (error);
 3198                 }
 3199 #endif /* DEVICE_POLLING */
 3200                 XL_LOCK(sc);
 3201                 ifp->if_capenable = ifr->ifr_reqcap;
 3202                 if (ifp->if_capenable & IFCAP_TXCSUM)
 3203                         ifp->if_hwassist = XL905B_CSUM_FEATURES;
 3204                 else
 3205                         ifp->if_hwassist = 0;
 3206                 XL_UNLOCK(sc);
 3207                 break;
 3208         default:
 3209                 error = ether_ioctl(ifp, command, data);
 3210                 break;
 3211         }
 3212 
 3213         return (error);
 3214 }
 3215 
 3216 static int
 3217 xl_watchdog(struct xl_softc *sc)
 3218 {
 3219         struct ifnet            *ifp = sc->xl_ifp;
 3220         u_int16_t               status = 0;
 3221         int                     misintr;
 3222 
 3223         XL_LOCK_ASSERT(sc);
 3224 
 3225         if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
 3226                 return (0);
 3227 
 3228         xl_rxeof(sc);
 3229         xl_txeoc(sc);
 3230         misintr = 0;
 3231         if (sc->xl_type == XL_TYPE_905B) {
 3232                 xl_txeof_90xB(sc);
 3233                 if (sc->xl_cdata.xl_tx_cnt == 0)
 3234                         misintr++;
 3235         } else {
 3236                 xl_txeof(sc);
 3237                 if (sc->xl_cdata.xl_tx_head == NULL)
 3238                         misintr++;
 3239         }
 3240         if (misintr != 0) {
 3241                 device_printf(sc->xl_dev,
 3242                     "watchdog timeout (missed Tx interrupts) -- recovering\n");
 3243                 return (0);
 3244         }
 3245 
 3246         ifp->if_oerrors++;
 3247         XL_SEL_WIN(4);
 3248         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3249         device_printf(sc->xl_dev, "watchdog timeout\n");
 3250 
 3251         if (status & XL_MEDIASTAT_CARRIER)
 3252                 device_printf(sc->xl_dev,
 3253                     "no carrier - transceiver cable problem?\n");
 3254 
 3255         xl_reset(sc);
 3256         xl_init_locked(sc);
 3257 
 3258         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 3259                 if (sc->xl_type == XL_TYPE_905B)
 3260                         xl_start_90xB_locked(ifp);
 3261                 else
 3262                         xl_start_locked(ifp);
 3263         }
 3264 
 3265         return (EJUSTRETURN);
 3266 }
 3267 
 3268 /*
 3269  * Stop the adapter and free any mbufs allocated to the
 3270  * RX and TX lists.
 3271  */
 3272 static void
 3273 xl_stop(struct xl_softc *sc)
 3274 {
 3275         register int            i;
 3276         struct ifnet            *ifp = sc->xl_ifp;
 3277 
 3278         XL_LOCK_ASSERT(sc);
 3279 
 3280         sc->xl_wdog_timer = 0;
 3281 
 3282         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
 3283         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 3284         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
 3285         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
 3286         xl_wait(sc);
 3287         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
 3288         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 3289         DELAY(800);
 3290 
 3291 #ifdef foo
 3292         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 3293         xl_wait(sc);
 3294         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 3295         xl_wait(sc);
 3296 #endif
 3297 
 3298         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
 3299         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
 3300         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3301         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3302                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 3303 
 3304         /* Stop the stats updater. */
 3305         callout_stop(&sc->xl_stat_callout);
 3306 
 3307         /*
 3308          * Free data in the RX lists.
 3309          */
 3310         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 3311                 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
 3312                         bus_dmamap_unload(sc->xl_mtag,
 3313                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3314                         bus_dmamap_destroy(sc->xl_mtag,
 3315                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3316                         m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
 3317                         sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
 3318                 }
 3319         }
 3320         if (sc->xl_ldata.xl_rx_list != NULL)
 3321                 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
 3322         /*
 3323          * Free the TX list buffers.
 3324          */
 3325         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 3326                 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
 3327                         bus_dmamap_unload(sc->xl_mtag,
 3328                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3329                         bus_dmamap_destroy(sc->xl_mtag,
 3330                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3331                         m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
 3332                         sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
 3333                 }
 3334         }
 3335         if (sc->xl_ldata.xl_tx_list != NULL)
 3336                 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
 3337 
 3338         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3339 }
 3340 
 3341 /*
 3342  * Stop all chip I/O so that the kernel's probe routines don't
 3343  * get confused by errant DMAs when rebooting.
 3344  */
 3345 static int
 3346 xl_shutdown(device_t dev)
 3347 {
 3348         struct xl_softc         *sc;
 3349 
 3350         sc = device_get_softc(dev);
 3351 
 3352         XL_LOCK(sc);
 3353         xl_reset(sc);
 3354         xl_stop(sc);
 3355         XL_UNLOCK(sc);
 3356 
 3357         return (0);
 3358 }
 3359 
 3360 static int
 3361 xl_suspend(device_t dev)
 3362 {
 3363         struct xl_softc         *sc;
 3364 
 3365         sc = device_get_softc(dev);
 3366 
 3367         XL_LOCK(sc);
 3368         xl_stop(sc);
 3369         XL_UNLOCK(sc);
 3370 
 3371         return (0);
 3372 }
 3373 
 3374 static int
 3375 xl_resume(device_t dev)
 3376 {
 3377         struct xl_softc         *sc;
 3378         struct ifnet            *ifp;
 3379 
 3380         sc = device_get_softc(dev);
 3381         ifp = sc->xl_ifp;
 3382 
 3383         XL_LOCK(sc);
 3384 
 3385         xl_reset(sc);
 3386         if (ifp->if_flags & IFF_UP)
 3387                 xl_init_locked(sc);
 3388 
 3389         XL_UNLOCK(sc);
 3390 
 3391         return (0);
 3392 }

Cache object: bb8a13eea4b3b2a7cbbc2de92bafd646


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.