The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/pci/if_xl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, 1998, 1999
    3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/6.3/sys/pci/if_xl.c 173886 2007-11-24 19:45:58Z cvs2svn $");
   35 
   36 /*
   37  * 3Com 3c90x Etherlink XL PCI NIC driver
   38  *
   39  * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
   40  * bus-master chips (3c90x cards and embedded controllers) including
   41  * the following:
   42  *
   43  * 3Com 3c900-TPO       10Mbps/RJ-45
   44  * 3Com 3c900-COMBO     10Mbps/RJ-45,AUI,BNC
   45  * 3Com 3c905-TX        10/100Mbps/RJ-45
   46  * 3Com 3c905-T4        10/100Mbps/RJ-45
   47  * 3Com 3c900B-TPO      10Mbps/RJ-45
   48  * 3Com 3c900B-COMBO    10Mbps/RJ-45,AUI,BNC
   49  * 3Com 3c900B-TPC      10Mbps/RJ-45,BNC
   50  * 3Com 3c900B-FL       10Mbps/Fiber-optic
   51  * 3Com 3c905B-COMBO    10/100Mbps/RJ-45,AUI,BNC
   52  * 3Com 3c905B-TX       10/100Mbps/RJ-45
   53  * 3Com 3c905B-FL/FX    10/100Mbps/Fiber-optic
   54  * 3Com 3c905C-TX       10/100Mbps/RJ-45 (Tornado ASIC)
   55  * 3Com 3c980-TX        10/100Mbps server adapter (Hurricane ASIC)
   56  * 3Com 3c980C-TX       10/100Mbps server adapter (Tornado ASIC)
   57  * 3Com 3cSOHO100-TX    10/100Mbps/RJ-45 (Hurricane ASIC)
   58  * 3Com 3c450-TX        10/100Mbps/RJ-45 (Tornado ASIC)
   59  * 3Com 3c555           10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
   60  * 3Com 3c556           10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   61  * 3Com 3c556B          10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
   62  * 3Com 3c575TX         10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   63  * 3Com 3c575B          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   64  * 3Com 3c575C          10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   65  * 3Com 3cxfem656       10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   66  * 3Com 3cxfem656b      10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
   67  * 3Com 3cxfem656c      10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
   68  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
   69  * Dell on-board 3c920 10/100Mbps/RJ-45
   70  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
   71  * Dell Latitude laptop docking station embedded 3c905-TX
   72  *
   73  * Written by Bill Paul <wpaul@ctr.columbia.edu>
   74  * Electrical Engineering Department
   75  * Columbia University, New York City
   76  */
   77 /*
   78  * The 3c90x series chips use a bus-master DMA interface for transfering
   79  * packets to and from the controller chip. Some of the "vortex" cards
   80  * (3c59x) also supported a bus master mode, however for those chips
   81  * you could only DMA packets to/from a contiguous memory buffer. For
   82  * transmission this would mean copying the contents of the queued mbuf
   83  * chain into an mbuf cluster and then DMAing the cluster. This extra
   84  * copy would sort of defeat the purpose of the bus master support for
   85  * any packet that doesn't fit into a single mbuf.
   86  *
   87  * By contrast, the 3c90x cards support a fragment-based bus master
   88  * mode where mbuf chains can be encapsulated using TX descriptors.
   89  * This is similar to other PCI chips such as the Texas Instruments
   90  * ThunderLAN and the Intel 82557/82558.
   91  *
   92  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
   93  * bus master chips because they maintain the old PIO interface for
   94  * backwards compatibility, but starting with the 3c905B and the
   95  * "cyclone" chips, the compatibility interface has been dropped.
   96  * Since using bus master DMA is a big win, we use this driver to
   97  * support the PCI "boomerang" chips even though they work with the
   98  * "vortex" driver in order to obtain better performance.
   99  *
  100  * This driver is in the /sys/pci directory because it only supports
  101  * PCI-based NICs.
  102  */
  103 
  104 #ifdef HAVE_KERNEL_OPTION_HEADERS
  105 #include "opt_device_polling.h"
  106 #endif
  107 
  108 #include <sys/param.h>
  109 #include <sys/systm.h>
  110 #include <sys/sockio.h>
  111 #include <sys/endian.h>
  112 #include <sys/mbuf.h>
  113 #include <sys/kernel.h>
  114 #include <sys/module.h>
  115 #include <sys/socket.h>
  116 #include <sys/taskqueue.h>
  117 
  118 #include <net/if.h>
  119 #include <net/if_arp.h>
  120 #include <net/ethernet.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/if_types.h>
  124 
  125 #include <net/bpf.h>
  126 
  127 #include <machine/bus.h>
  128 #include <machine/resource.h>
  129 #include <sys/bus.h>
  130 #include <sys/rman.h>
  131 
  132 #include <dev/mii/mii.h>
  133 #include <dev/mii/miivar.h>
  134 
  135 #include <dev/pci/pcireg.h>
  136 #include <dev/pci/pcivar.h>
  137 
  138 MODULE_DEPEND(xl, pci, 1, 1, 1);
  139 MODULE_DEPEND(xl, ether, 1, 1, 1);
  140 MODULE_DEPEND(xl, miibus, 1, 1, 1);
  141 
  142 /* "device miibus" required.  See GENERIC if you get errors here. */
  143 #include "miibus_if.h"
  144 
  145 #include <pci/if_xlreg.h>
  146 
  147 /*
  148  * TX Checksumming is disabled by default for two reasons:
  149  * - TX Checksumming will occasionally produce corrupt packets
  150  * - TX Checksumming seems to reduce performance
  151  *
  152  * Only 905B/C cards were reported to have this problem, it is possible
  153  * that later chips _may_ be immune.
  154  */
  155 #define XL905B_TXCSUM_BROKEN    1
  156 
  157 #ifdef XL905B_TXCSUM_BROKEN
  158 #define XL905B_CSUM_FEATURES    0
  159 #else
  160 #define XL905B_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
  161 #endif
  162 
  163 /*
  164  * Various supported device vendors/types and their names.
  165  */
  166 static struct xl_type xl_devs[] = {
  167         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
  168                 "3Com 3c900-TPO Etherlink XL" },
  169         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
  170                 "3Com 3c900-COMBO Etherlink XL" },
  171         { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
  172                 "3Com 3c905-TX Fast Etherlink XL" },
  173         { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
  174                 "3Com 3c905-T4 Fast Etherlink XL" },
  175         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
  176                 "3Com 3c900B-TPO Etherlink XL" },
  177         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
  178                 "3Com 3c900B-COMBO Etherlink XL" },
  179         { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
  180                 "3Com 3c900B-TPC Etherlink XL" },
  181         { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
  182                 "3Com 3c900B-FL Etherlink XL" },
  183         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
  184                 "3Com 3c905B-TX Fast Etherlink XL" },
  185         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
  186                 "3Com 3c905B-T4 Fast Etherlink XL" },
  187         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
  188                 "3Com 3c905B-FX/SC Fast Etherlink XL" },
  189         { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
  190                 "3Com 3c905B-COMBO Fast Etherlink XL" },
  191         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
  192                 "3Com 3c905C-TX Fast Etherlink XL" },
  193         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
  194                 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
  195         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM,
  196                 "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" },
  197         { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
  198                 "3Com 3c980 Fast Etherlink XL" },
  199         { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
  200                 "3Com 3c980C Fast Etherlink XL" },
  201         { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
  202                 "3Com 3cSOHO100-TX OfficeConnect" },
  203         { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
  204                 "3Com 3c450-TX HomeConnect" },
  205         { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
  206                 "3Com 3c555 Fast Etherlink XL" },
  207         { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
  208                 "3Com 3c556 Fast Etherlink XL" },
  209         { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
  210                 "3Com 3c556B Fast Etherlink XL" },
  211         { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
  212                 "3Com 3c575TX Fast Etherlink XL" },
  213         { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
  214                 "3Com 3c575B Fast Etherlink XL" },
  215         { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
  216                 "3Com 3c575C Fast Etherlink XL" },
  217         { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
  218                 "3Com 3c656 Fast Etherlink XL" },
  219         { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
  220                 "3Com 3c656B Fast Etherlink XL" },
  221         { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
  222                 "3Com 3c656C Fast Etherlink XL" },
  223         { 0, 0, NULL }
  224 };
  225 
  226 static int xl_probe(device_t);
  227 static int xl_attach(device_t);
  228 static int xl_detach(device_t);
  229 
  230 static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
  231 static void xl_stats_update(void *);
  232 static void xl_stats_update_locked(struct xl_softc *);
  233 static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf *);
  234 static void xl_rxeof(struct xl_softc *);
  235 static void xl_rxeof_task(void *, int);
  236 static int xl_rx_resync(struct xl_softc *);
  237 static void xl_txeof(struct xl_softc *);
  238 static void xl_txeof_90xB(struct xl_softc *);
  239 static void xl_txeoc(struct xl_softc *);
  240 static void xl_intr(void *);
  241 static void xl_start(struct ifnet *);
  242 static void xl_start_locked(struct ifnet *);
  243 static void xl_start_90xB_locked(struct ifnet *);
  244 static int xl_ioctl(struct ifnet *, u_long, caddr_t);
  245 static void xl_init(void *);
  246 static void xl_init_locked(struct xl_softc *);
  247 static void xl_stop(struct xl_softc *);
  248 static int xl_watchdog(struct xl_softc *);
  249 static void xl_shutdown(device_t);
  250 static int xl_suspend(device_t);
  251 static int xl_resume(device_t);
  252 
  253 #ifdef DEVICE_POLLING
  254 static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
  255 static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count);
  256 #endif
  257 
  258 static int xl_ifmedia_upd(struct ifnet *);
  259 static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  260 
  261 static int xl_eeprom_wait(struct xl_softc *);
  262 static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
  263 static void xl_mii_sync(struct xl_softc *);
  264 static void xl_mii_send(struct xl_softc *, u_int32_t, int);
  265 static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
  266 static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
  267 
  268 static void xl_setcfg(struct xl_softc *);
  269 static void xl_setmode(struct xl_softc *, int);
  270 static void xl_setmulti(struct xl_softc *);
  271 static void xl_setmulti_hash(struct xl_softc *);
  272 static void xl_reset(struct xl_softc *);
  273 static int xl_list_rx_init(struct xl_softc *);
  274 static int xl_list_tx_init(struct xl_softc *);
  275 static int xl_list_tx_init_90xB(struct xl_softc *);
  276 static void xl_wait(struct xl_softc *);
  277 static void xl_mediacheck(struct xl_softc *);
  278 static void xl_choose_media(struct xl_softc *sc, int *media);
  279 static void xl_choose_xcvr(struct xl_softc *, int);
  280 static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int);
  281 static void xl_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
  282 static void xl_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
  283 #ifdef notdef
  284 static void xl_testpacket(struct xl_softc *);
  285 #endif
  286 
  287 static int xl_miibus_readreg(device_t, int, int);
  288 static int xl_miibus_writereg(device_t, int, int, int);
  289 static void xl_miibus_statchg(device_t);
  290 static void xl_miibus_mediainit(device_t);
  291 
  292 static device_method_t xl_methods[] = {
  293         /* Device interface */
  294         DEVMETHOD(device_probe,         xl_probe),
  295         DEVMETHOD(device_attach,        xl_attach),
  296         DEVMETHOD(device_detach,        xl_detach),
  297         DEVMETHOD(device_shutdown,      xl_shutdown),
  298         DEVMETHOD(device_suspend,       xl_suspend),
  299         DEVMETHOD(device_resume,        xl_resume),
  300 
  301         /* bus interface */
  302         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  303         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  304 
  305         /* MII interface */
  306         DEVMETHOD(miibus_readreg,       xl_miibus_readreg),
  307         DEVMETHOD(miibus_writereg,      xl_miibus_writereg),
  308         DEVMETHOD(miibus_statchg,       xl_miibus_statchg),
  309         DEVMETHOD(miibus_mediainit,     xl_miibus_mediainit),
  310 
  311         { 0, 0 }
  312 };
  313 
  314 static driver_t xl_driver = {
  315         "xl",
  316         xl_methods,
  317         sizeof(struct xl_softc)
  318 };
  319 
  320 static devclass_t xl_devclass;
  321 
  322 DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
  323 DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
  324 DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
  325 
  326 static void
  327 xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  328 {
  329         u_int32_t *paddr;
  330 
  331         paddr = arg;
  332         *paddr = segs->ds_addr;
  333 }
  334 
  335 static void
  336 xl_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg,
  337     bus_size_t mapsize, int error)
  338 {
  339         u_int32_t *paddr;
  340 
  341         if (error)
  342                 return;
  343 
  344         KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
  345         paddr = arg;
  346         *paddr = segs->ds_addr;
  347 }
  348 
  349 static void
  350 xl_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
  351     bus_size_t mapsize, int error)
  352 {
  353         struct xl_list *l;
  354         int i, total_len;
  355 
  356         if (error)
  357                 return;
  358 
  359         KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
  360 
  361         total_len = 0;
  362         l = arg;
  363         for (i = 0; i < nseg; i++) {
  364                 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
  365                 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
  366                 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
  367                 total_len += segs[i].ds_len;
  368         }
  369         l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
  370             XL_LAST_FRAG);
  371         l->xl_status = htole32(total_len);
  372         l->xl_next = 0;
  373 }
  374 
  375 /*
  376  * Murphy's law says that it's possible the chip can wedge and
  377  * the 'command in progress' bit may never clear. Hence, we wait
  378  * only a finite amount of time to avoid getting caught in an
  379  * infinite loop. Normally this delay routine would be a macro,
  380  * but it isn't called during normal operation so we can afford
  381  * to make it a function.
  382  */
  383 static void
  384 xl_wait(struct xl_softc *sc)
  385 {
  386         register int            i;
  387 
  388         for (i = 0; i < XL_TIMEOUT; i++) {
  389                 if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0)
  390                         break;
  391         }
  392 
  393         if (i == XL_TIMEOUT)
  394                 device_printf(sc->xl_dev, "command never completed!\n");
  395 }
  396 
  397 /*
  398  * MII access routines are provided for adapters with external
  399  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
  400  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
  401  * Note: if you don't perform the MDIO operations just right,
  402  * it's possible to end up with code that works correctly with
  403  * some chips/CPUs/processor speeds/bus speeds/etc but not
  404  * with others.
  405  */
  406 #define MII_SET(x)                                      \
  407         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  408                 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
  409 
  410 #define MII_CLR(x)                                      \
  411         CSR_WRITE_2(sc, XL_W4_PHY_MGMT,                 \
  412                 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
  413 
  414 /*
  415  * Sync the PHYs by setting data bit and strobing the clock 32 times.
  416  */
  417 static void
  418 xl_mii_sync(struct xl_softc *sc)
  419 {
  420         register int            i;
  421 
  422         XL_SEL_WIN(4);
  423         MII_SET(XL_MII_DIR|XL_MII_DATA);
  424 
  425         for (i = 0; i < 32; i++) {
  426                 MII_SET(XL_MII_CLK);
  427                 MII_SET(XL_MII_DATA);
  428                 MII_SET(XL_MII_DATA);
  429                 MII_CLR(XL_MII_CLK);
  430                 MII_SET(XL_MII_DATA);
  431                 MII_SET(XL_MII_DATA);
  432         }
  433 }
  434 
  435 /*
  436  * Clock a series of bits through the MII.
  437  */
  438 static void
  439 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
  440 {
  441         int                     i;
  442 
  443         XL_SEL_WIN(4);
  444         MII_CLR(XL_MII_CLK);
  445 
  446         for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
  447                 if (bits & i) {
  448                         MII_SET(XL_MII_DATA);
  449                 } else {
  450                         MII_CLR(XL_MII_DATA);
  451                 }
  452                 MII_CLR(XL_MII_CLK);
  453                 MII_SET(XL_MII_CLK);
  454         }
  455 }
  456 
  457 /*
  458  * Read an PHY register through the MII.
  459  */
  460 static int
  461 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
  462 {
  463         int                     i, ack;
  464 
  465         /* Set up frame for RX. */
  466         frame->mii_stdelim = XL_MII_STARTDELIM;
  467         frame->mii_opcode = XL_MII_READOP;
  468         frame->mii_turnaround = 0;
  469         frame->mii_data = 0;
  470 
  471         /* Select register window 4. */
  472         XL_SEL_WIN(4);
  473 
  474         CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
  475         /* Turn on data xmit. */
  476         MII_SET(XL_MII_DIR);
  477 
  478         xl_mii_sync(sc);
  479 
  480         /* Send command/address info. */
  481         xl_mii_send(sc, frame->mii_stdelim, 2);
  482         xl_mii_send(sc, frame->mii_opcode, 2);
  483         xl_mii_send(sc, frame->mii_phyaddr, 5);
  484         xl_mii_send(sc, frame->mii_regaddr, 5);
  485 
  486         /* Idle bit */
  487         MII_CLR((XL_MII_CLK|XL_MII_DATA));
  488         MII_SET(XL_MII_CLK);
  489 
  490         /* Turn off xmit. */
  491         MII_CLR(XL_MII_DIR);
  492 
  493         /* Check for ack */
  494         MII_CLR(XL_MII_CLK);
  495         ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
  496         MII_SET(XL_MII_CLK);
  497 
  498         /*
  499          * Now try reading data bits. If the ack failed, we still
  500          * need to clock through 16 cycles to keep the PHY(s) in sync.
  501          */
  502         if (ack) {
  503                 for (i = 0; i < 16; i++) {
  504                         MII_CLR(XL_MII_CLK);
  505                         MII_SET(XL_MII_CLK);
  506                 }
  507                 goto fail;
  508         }
  509 
  510         for (i = 0x8000; i; i >>= 1) {
  511                 MII_CLR(XL_MII_CLK);
  512                 if (!ack) {
  513                         if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
  514                                 frame->mii_data |= i;
  515                 }
  516                 MII_SET(XL_MII_CLK);
  517         }
  518 
  519 fail:
  520         MII_CLR(XL_MII_CLK);
  521         MII_SET(XL_MII_CLK);
  522 
  523         return (ack ? 1 : 0);
  524 }
  525 
  526 /*
  527  * Write to a PHY register through the MII.
  528  */
  529 static int
  530 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
  531 {
  532 
  533         /* Set up frame for TX. */
  534         frame->mii_stdelim = XL_MII_STARTDELIM;
  535         frame->mii_opcode = XL_MII_WRITEOP;
  536         frame->mii_turnaround = XL_MII_TURNAROUND;
  537 
  538         /* Select the window 4. */
  539         XL_SEL_WIN(4);
  540 
  541         /* Turn on data output. */
  542         MII_SET(XL_MII_DIR);
  543 
  544         xl_mii_sync(sc);
  545 
  546         xl_mii_send(sc, frame->mii_stdelim, 2);
  547         xl_mii_send(sc, frame->mii_opcode, 2);
  548         xl_mii_send(sc, frame->mii_phyaddr, 5);
  549         xl_mii_send(sc, frame->mii_regaddr, 5);
  550         xl_mii_send(sc, frame->mii_turnaround, 2);
  551         xl_mii_send(sc, frame->mii_data, 16);
  552 
  553         /* Idle bit. */
  554         MII_SET(XL_MII_CLK);
  555         MII_CLR(XL_MII_CLK);
  556 
  557         /* Turn off xmit. */
  558         MII_CLR(XL_MII_DIR);
  559 
  560         return (0);
  561 }
  562 
  563 static int
  564 xl_miibus_readreg(device_t dev, int phy, int reg)
  565 {
  566         struct xl_softc         *sc;
  567         struct xl_mii_frame     frame;
  568 
  569         sc = device_get_softc(dev);
  570 
  571         /*
  572          * Pretend that PHYs are only available at MII address 24.
  573          * This is to guard against problems with certain 3Com ASIC
  574          * revisions that incorrectly map the internal transceiver
  575          * control registers at all MII addresses. This can cause
  576          * the miibus code to attach the same PHY several times over.
  577          */
  578         if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
  579                 return (0);
  580 
  581         bzero((char *)&frame, sizeof(frame));
  582         frame.mii_phyaddr = phy;
  583         frame.mii_regaddr = reg;
  584 
  585         xl_mii_readreg(sc, &frame);
  586 
  587         return (frame.mii_data);
  588 }
  589 
  590 static int
  591 xl_miibus_writereg(device_t dev, int phy, int reg, int data)
  592 {
  593         struct xl_softc         *sc;
  594         struct xl_mii_frame     frame;
  595 
  596         sc = device_get_softc(dev);
  597 
  598         if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24)
  599                 return (0);
  600 
  601         bzero((char *)&frame, sizeof(frame));
  602         frame.mii_phyaddr = phy;
  603         frame.mii_regaddr = reg;
  604         frame.mii_data = data;
  605 
  606         xl_mii_writereg(sc, &frame);
  607 
  608         return (0);
  609 }
  610 
  611 static void
  612 xl_miibus_statchg(device_t dev)
  613 {
  614         struct xl_softc         *sc;
  615         struct mii_data         *mii;
  616 
  617         sc = device_get_softc(dev);
  618         mii = device_get_softc(sc->xl_miibus);
  619 
  620         xl_setcfg(sc);
  621 
  622         /* Set ASIC's duplex mode to match the PHY. */
  623         XL_SEL_WIN(3);
  624         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
  625                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  626         else
  627                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  628                     (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  629 }
  630 
  631 /*
  632  * Special support for the 3c905B-COMBO. This card has 10/100 support
  633  * plus BNC and AUI ports. This means we will have both an miibus attached
  634  * plus some non-MII media settings. In order to allow this, we have to
  635  * add the extra media to the miibus's ifmedia struct, but we can't do
  636  * that during xl_attach() because the miibus hasn't been attached yet.
  637  * So instead, we wait until the miibus probe/attach is done, at which
  638  * point we will get a callback telling is that it's safe to add our
  639  * extra media.
  640  */
  641 static void
  642 xl_miibus_mediainit(device_t dev)
  643 {
  644         struct xl_softc         *sc;
  645         struct mii_data         *mii;
  646         struct ifmedia          *ifm;
  647 
  648         sc = device_get_softc(dev);
  649         mii = device_get_softc(sc->xl_miibus);
  650         ifm = &mii->mii_media;
  651 
  652         if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) {
  653                 /*
  654                  * Check for a 10baseFL board in disguise.
  655                  */
  656                 if (sc->xl_type == XL_TYPE_905B &&
  657                     sc->xl_media == XL_MEDIAOPT_10FL) {
  658                         if (bootverbose)
  659                                 device_printf(sc->xl_dev, "found 10baseFL\n");
  660                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL);
  661                         ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0,
  662                             NULL);
  663                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
  664                                 ifmedia_add(ifm,
  665                                     IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL);
  666                 } else {
  667                         if (bootverbose)
  668                                 device_printf(sc->xl_dev, "found AUI\n");
  669                         ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL);
  670                 }
  671         }
  672 
  673         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  674                 if (bootverbose)
  675                         device_printf(sc->xl_dev, "found BNC\n");
  676                 ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL);
  677         }
  678 }
  679 
  680 /*
  681  * The EEPROM is slow: give it time to come ready after issuing
  682  * it a command.
  683  */
  684 static int
  685 xl_eeprom_wait(struct xl_softc *sc)
  686 {
  687         int                     i;
  688 
  689         for (i = 0; i < 100; i++) {
  690                 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
  691                         DELAY(162);
  692                 else
  693                         break;
  694         }
  695 
  696         if (i == 100) {
  697                 device_printf(sc->xl_dev, "eeprom failed to come ready\n");
  698                 return (1);
  699         }
  700 
  701         return (0);
  702 }
  703 
  704 /*
  705  * Read a sequence of words from the EEPROM. Note that ethernet address
  706  * data is stored in the EEPROM in network byte order.
  707  */
  708 static int
  709 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
  710 {
  711         int                     err = 0, i;
  712         u_int16_t               word = 0, *ptr;
  713 
  714 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
  715 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
  716         /*
  717          * XXX: WARNING! DANGER!
  718          * It's easy to accidentally overwrite the rom content!
  719          * Note: the 3c575 uses 8bit EEPROM offsets.
  720          */
  721         XL_SEL_WIN(0);
  722 
  723         if (xl_eeprom_wait(sc))
  724                 return (1);
  725 
  726         if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
  727                 off += 0x30;
  728 
  729         for (i = 0; i < cnt; i++) {
  730                 if (sc->xl_flags & XL_FLAG_8BITROM)
  731                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  732                             XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
  733                 else
  734                         CSR_WRITE_2(sc, XL_W0_EE_CMD,
  735                             XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
  736                 err = xl_eeprom_wait(sc);
  737                 if (err)
  738                         break;
  739                 word = CSR_READ_2(sc, XL_W0_EE_DATA);
  740                 ptr = (u_int16_t *)(dest + (i * 2));
  741                 if (swap)
  742                         *ptr = ntohs(word);
  743                 else
  744                         *ptr = word;
  745         }
  746 
  747         return (err ? 1 : 0);
  748 }
  749 
  750 /*
  751  * NICs older than the 3c905B have only one multicast option, which
  752  * is to enable reception of all multicast frames.
  753  */
  754 static void
  755 xl_setmulti(struct xl_softc *sc)
  756 {
  757         struct ifnet            *ifp = sc->xl_ifp;
  758         struct ifmultiaddr      *ifma;
  759         u_int8_t                rxfilt;
  760         int                     mcnt = 0;
  761 
  762         XL_LOCK_ASSERT(sc);
  763 
  764         XL_SEL_WIN(5);
  765         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  766 
  767         if (ifp->if_flags & IFF_ALLMULTI) {
  768                 rxfilt |= XL_RXFILTER_ALLMULTI;
  769                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  770                 return;
  771         }
  772 
  773         IF_ADDR_LOCK(ifp);
  774         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
  775                 mcnt++;
  776         IF_ADDR_UNLOCK(ifp);
  777 
  778         if (mcnt)
  779                 rxfilt |= XL_RXFILTER_ALLMULTI;
  780         else
  781                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  782 
  783         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  784 }
  785 
  786 /*
  787  * 3c905B adapters have a hash filter that we can program.
  788  */
  789 static void
  790 xl_setmulti_hash(struct xl_softc *sc)
  791 {
  792         struct ifnet            *ifp = sc->xl_ifp;
  793         int                     h = 0, i;
  794         struct ifmultiaddr      *ifma;
  795         u_int8_t                rxfilt;
  796         int                     mcnt = 0;
  797 
  798         XL_LOCK_ASSERT(sc);
  799 
  800         XL_SEL_WIN(5);
  801         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
  802 
  803         if (ifp->if_flags & IFF_ALLMULTI) {
  804                 rxfilt |= XL_RXFILTER_ALLMULTI;
  805                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
  806                 return;
  807         } else
  808                 rxfilt &= ~XL_RXFILTER_ALLMULTI;
  809 
  810         /* first, zot all the existing hash bits */
  811         for (i = 0; i < XL_HASHFILT_SIZE; i++)
  812                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
  813 
  814         /* now program new ones */
  815         IF_ADDR_LOCK(ifp);
  816         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  817                 if (ifma->ifma_addr->sa_family != AF_LINK)
  818                         continue;
  819                 /*
  820                  * Note: the 3c905B currently only supports a 64-bit hash
  821                  * table, which means we really only need 6 bits, but the
  822                  * manual indicates that future chip revisions will have a
  823                  * 256-bit hash table, hence the routine is set up to
  824                  * calculate 8 bits of position info in case we need it some
  825                  * day.
  826                  * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
  827                  * a 256 bit hash table. This means we have to use all 8 bits
  828                  * regardless. On older cards, the upper 2 bits will be
  829                  * ignored. Grrrr....
  830                  */
  831                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  832                     ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
  833                 CSR_WRITE_2(sc, XL_COMMAND,
  834                     h | XL_CMD_RX_SET_HASH | XL_HASH_SET);
  835                 mcnt++;
  836         }
  837         IF_ADDR_UNLOCK(ifp);
  838 
  839         if (mcnt)
  840                 rxfilt |= XL_RXFILTER_MULTIHASH;
  841         else
  842                 rxfilt &= ~XL_RXFILTER_MULTIHASH;
  843 
  844         CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT);
  845 }
  846 
  847 #ifdef notdef
  848 static void
  849 xl_testpacket(struct xl_softc *sc)
  850 {
  851         struct mbuf             *m;
  852         struct ifnet            *ifp = sc->xl_ifp;
  853 
  854         MGETHDR(m, M_DONTWAIT, MT_DATA);
  855 
  856         if (m == NULL)
  857                 return;
  858 
  859         bcopy(&IFP2ENADDR(sc->xl_ifp),
  860                 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
  861         bcopy(&IFP2ENADDR(sc->xl_ifp),
  862                 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
  863         mtod(m, struct ether_header *)->ether_type = htons(3);
  864         mtod(m, unsigned char *)[14] = 0;
  865         mtod(m, unsigned char *)[15] = 0;
  866         mtod(m, unsigned char *)[16] = 0xE3;
  867         m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
  868         IFQ_ENQUEUE(&ifp->if_snd, m);
  869         xl_start(ifp);
  870 }
  871 #endif
  872 
  873 static void
  874 xl_setcfg(struct xl_softc *sc)
  875 {
  876         u_int32_t               icfg;
  877 
  878         /*XL_LOCK_ASSERT(sc);*/
  879 
  880         XL_SEL_WIN(3);
  881         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  882         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  883         if (sc->xl_media & XL_MEDIAOPT_MII ||
  884                 sc->xl_media & XL_MEDIAOPT_BT4)
  885                 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
  886         if (sc->xl_media & XL_MEDIAOPT_BTX)
  887                 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
  888 
  889         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  890         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  891 }
  892 
  893 static void
  894 xl_setmode(struct xl_softc *sc, int media)
  895 {
  896         u_int32_t               icfg;
  897         u_int16_t               mediastat;
  898         char                    *pmsg = "", *dmsg = "";
  899 
  900         XL_LOCK_ASSERT(sc);
  901 
  902         XL_SEL_WIN(4);
  903         mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
  904         XL_SEL_WIN(3);
  905         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
  906 
  907         if (sc->xl_media & XL_MEDIAOPT_BT) {
  908                 if (IFM_SUBTYPE(media) == IFM_10_T) {
  909                         pmsg = "10baseT transceiver";
  910                         sc->xl_xcvr = XL_XCVR_10BT;
  911                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  912                         icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
  913                         mediastat |= XL_MEDIASTAT_LINKBEAT |
  914                             XL_MEDIASTAT_JABGUARD;
  915                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  916                 }
  917         }
  918 
  919         if (sc->xl_media & XL_MEDIAOPT_BFX) {
  920                 if (IFM_SUBTYPE(media) == IFM_100_FX) {
  921                         pmsg = "100baseFX port";
  922                         sc->xl_xcvr = XL_XCVR_100BFX;
  923                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  924                         icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
  925                         mediastat |= XL_MEDIASTAT_LINKBEAT;
  926                         mediastat &= ~XL_MEDIASTAT_SQEENB;
  927                 }
  928         }
  929 
  930         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
  931                 if (IFM_SUBTYPE(media) == IFM_10_5) {
  932                         pmsg = "AUI port";
  933                         sc->xl_xcvr = XL_XCVR_AUI;
  934                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  935                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  936                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  937                             XL_MEDIASTAT_JABGUARD);
  938                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  939                 }
  940                 if (IFM_SUBTYPE(media) == IFM_10_FL) {
  941                         pmsg = "10baseFL transceiver";
  942                         sc->xl_xcvr = XL_XCVR_AUI;
  943                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  944                         icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
  945                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  946                             XL_MEDIASTAT_JABGUARD);
  947                         mediastat |= ~XL_MEDIASTAT_SQEENB;
  948                 }
  949         }
  950 
  951         if (sc->xl_media & XL_MEDIAOPT_BNC) {
  952                 if (IFM_SUBTYPE(media) == IFM_10_2) {
  953                         pmsg = "AUI port";
  954                         sc->xl_xcvr = XL_XCVR_COAX;
  955                         icfg &= ~XL_ICFG_CONNECTOR_MASK;
  956                         icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
  957                         mediastat &= ~(XL_MEDIASTAT_LINKBEAT |
  958                             XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB);
  959                 }
  960         }
  961 
  962         if ((media & IFM_GMASK) == IFM_FDX ||
  963                         IFM_SUBTYPE(media) == IFM_100_FX) {
  964                 dmsg = "full";
  965                 XL_SEL_WIN(3);
  966                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
  967         } else {
  968                 dmsg = "half";
  969                 XL_SEL_WIN(3);
  970                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
  971                         (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
  972         }
  973 
  974         if (IFM_SUBTYPE(media) == IFM_10_2)
  975                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
  976         else
  977                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
  978 
  979         CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
  980         XL_SEL_WIN(4);
  981         CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
  982 
  983         DELAY(800);
  984         XL_SEL_WIN(7);
  985 
  986         device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg);
  987 }
  988 
  989 static void
  990 xl_reset(struct xl_softc *sc)
  991 {
  992         register int            i;
  993 
  994         XL_LOCK_ASSERT(sc);
  995 
  996         XL_SEL_WIN(0);
  997         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
  998             ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
  999              XL_RESETOPT_DISADVFD:0));
 1000 
 1001         /*
 1002          * If we're using memory mapped register mode, pause briefly
 1003          * after issuing the reset command before trying to access any
 1004          * other registers. With my 3c575C cardbus card, failing to do
 1005          * this results in the system locking up while trying to poll
 1006          * the command busy bit in the status register.
 1007          */
 1008         if (sc->xl_flags & XL_FLAG_USE_MMIO)
 1009                 DELAY(100000);
 1010 
 1011         for (i = 0; i < XL_TIMEOUT; i++) {
 1012                 DELAY(10);
 1013                 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
 1014                         break;
 1015         }
 1016 
 1017         if (i == XL_TIMEOUT)
 1018                 device_printf(sc->xl_dev, "reset didn't complete\n");
 1019 
 1020         /* Reset TX and RX. */
 1021         /* Note: the RX reset takes an absurd amount of time
 1022          * on newer versions of the Tornado chips such as those
 1023          * on the 3c905CX and newer 3c908C cards. We wait an
 1024          * extra amount of time so that xl_wait() doesn't complain
 1025          * and annoy the users.
 1026          */
 1027         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 1028         DELAY(100000);
 1029         xl_wait(sc);
 1030         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 1031         xl_wait(sc);
 1032 
 1033         if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
 1034             sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
 1035                 XL_SEL_WIN(2);
 1036                 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS,
 1037                     CSR_READ_2(sc, XL_W2_RESET_OPTIONS) |
 1038                     ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ?
 1039                     XL_RESETOPT_INVERT_LED : 0) |
 1040                     ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ?
 1041                     XL_RESETOPT_INVERT_MII : 0));
 1042         }
 1043 
 1044         /* Wait a little while for the chip to get its brains in order. */
 1045         DELAY(100000);
 1046 }
 1047 
 1048 /*
 1049  * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
 1050  * IDs against our list and return a device name if we find a match.
 1051  */
 1052 static int
 1053 xl_probe(device_t dev)
 1054 {
 1055         struct xl_type          *t;
 1056 
 1057         t = xl_devs;
 1058 
 1059         while (t->xl_name != NULL) {
 1060                 if ((pci_get_vendor(dev) == t->xl_vid) &&
 1061                     (pci_get_device(dev) == t->xl_did)) {
 1062                         device_set_desc(dev, t->xl_name);
 1063                         return (BUS_PROBE_DEFAULT);
 1064                 }
 1065                 t++;
 1066         }
 1067 
 1068         return (ENXIO);
 1069 }
 1070 
 1071 /*
 1072  * This routine is a kludge to work around possible hardware faults
 1073  * or manufacturing defects that can cause the media options register
 1074  * (or reset options register, as it's called for the first generation
 1075  * 3c90x adapters) to return an incorrect result. I have encountered
 1076  * one Dell Latitude laptop docking station with an integrated 3c905-TX
 1077  * which doesn't have any of the 'mediaopt' bits set. This screws up
 1078  * the attach routine pretty badly because it doesn't know what media
 1079  * to look for. If we find ourselves in this predicament, this routine
 1080  * will try to guess the media options values and warn the user of a
 1081  * possible manufacturing defect with his adapter/system/whatever.
 1082  */
 1083 static void
 1084 xl_mediacheck(struct xl_softc *sc)
 1085 {
 1086 
 1087         /*
 1088          * If some of the media options bits are set, assume they are
 1089          * correct. If not, try to figure it out down below.
 1090          * XXX I should check for 10baseFL, but I don't have an adapter
 1091          * to test with.
 1092          */
 1093         if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
 1094                 /*
 1095                  * Check the XCVR value. If it's not in the normal range
 1096                  * of values, we need to fake it up here.
 1097                  */
 1098                 if (sc->xl_xcvr <= XL_XCVR_AUTO)
 1099                         return;
 1100                 else {
 1101                         device_printf(sc->xl_dev,
 1102                             "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr);
 1103                         device_printf(sc->xl_dev,
 1104                             "choosing new default based on card type\n");
 1105                 }
 1106         } else {
 1107                 if (sc->xl_type == XL_TYPE_905B &&
 1108                     sc->xl_media & XL_MEDIAOPT_10FL)
 1109                         return;
 1110                 device_printf(sc->xl_dev,
 1111 "WARNING: no media options bits set in the media options register!!\n");
 1112                 device_printf(sc->xl_dev,
 1113 "this could be a manufacturing defect in your adapter or system\n");
 1114                 device_printf(sc->xl_dev,
 1115 "attempting to guess media type; you should probably consult your vendor\n");
 1116         }
 1117 
 1118         xl_choose_xcvr(sc, 1);
 1119 }
 1120 
 1121 static void
 1122 xl_choose_xcvr(struct xl_softc *sc, int verbose)
 1123 {
 1124         u_int16_t               devid;
 1125 
 1126         /*
 1127          * Read the device ID from the EEPROM.
 1128          * This is what's loaded into the PCI device ID register, so it has
 1129          * to be correct otherwise we wouldn't have gotten this far.
 1130          */
 1131         xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
 1132 
 1133         switch (devid) {
 1134         case TC_DEVICEID_BOOMERANG_10BT:        /* 3c900-TPO */
 1135         case TC_DEVICEID_KRAKATOA_10BT:         /* 3c900B-TPO */
 1136                 sc->xl_media = XL_MEDIAOPT_BT;
 1137                 sc->xl_xcvr = XL_XCVR_10BT;
 1138                 if (verbose)
 1139                         device_printf(sc->xl_dev,
 1140                             "guessing 10BaseT transceiver\n");
 1141                 break;
 1142         case TC_DEVICEID_BOOMERANG_10BT_COMBO:  /* 3c900-COMBO */
 1143         case TC_DEVICEID_KRAKATOA_10BT_COMBO:   /* 3c900B-COMBO */
 1144                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1145                 sc->xl_xcvr = XL_XCVR_10BT;
 1146                 if (verbose)
 1147                         device_printf(sc->xl_dev,
 1148                             "guessing COMBO (AUI/BNC/TP)\n");
 1149                 break;
 1150         case TC_DEVICEID_KRAKATOA_10BT_TPC:     /* 3c900B-TPC */
 1151                 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
 1152                 sc->xl_xcvr = XL_XCVR_10BT;
 1153                 if (verbose)
 1154                         device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n");
 1155                 break;
 1156         case TC_DEVICEID_CYCLONE_10FL:          /* 3c900B-FL */
 1157                 sc->xl_media = XL_MEDIAOPT_10FL;
 1158                 sc->xl_xcvr = XL_XCVR_AUI;
 1159                 if (verbose)
 1160                         device_printf(sc->xl_dev, "guessing 10baseFL\n");
 1161                 break;
 1162         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1163         case TC_DEVICEID_HURRICANE_555:         /* 3c555 */
 1164         case TC_DEVICEID_HURRICANE_556:         /* 3c556 */
 1165         case TC_DEVICEID_HURRICANE_556B:        /* 3c556B */
 1166         case TC_DEVICEID_HURRICANE_575A:        /* 3c575TX */
 1167         case TC_DEVICEID_HURRICANE_575B:        /* 3c575B */
 1168         case TC_DEVICEID_HURRICANE_575C:        /* 3c575C */
 1169         case TC_DEVICEID_HURRICANE_656:         /* 3c656 */
 1170         case TC_DEVICEID_HURRICANE_656B:        /* 3c656B */
 1171         case TC_DEVICEID_TORNADO_656C:          /* 3c656C */
 1172         case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
 1173         case TC_DEVICEID_TORNADO_10_100BT_920B_WNM:     /* 3c920B-EMB-WNM */
 1174                 sc->xl_media = XL_MEDIAOPT_MII;
 1175                 sc->xl_xcvr = XL_XCVR_MII;
 1176                 if (verbose)
 1177                         device_printf(sc->xl_dev, "guessing MII\n");
 1178                 break;
 1179         case TC_DEVICEID_BOOMERANG_100BT4:      /* 3c905-T4 */
 1180         case TC_DEVICEID_CYCLONE_10_100BT4:     /* 3c905B-T4 */
 1181                 sc->xl_media = XL_MEDIAOPT_BT4;
 1182                 sc->xl_xcvr = XL_XCVR_MII;
 1183                 if (verbose)
 1184                         device_printf(sc->xl_dev, "guessing 100baseT4/MII\n");
 1185                 break;
 1186         case TC_DEVICEID_HURRICANE_10_100BT:    /* 3c905B-TX */
 1187         case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
 1188         case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
 1189         case TC_DEVICEID_HURRICANE_SOHO100TX:   /* 3cSOHO100-TX */
 1190         case TC_DEVICEID_TORNADO_10_100BT:      /* 3c905C-TX */
 1191         case TC_DEVICEID_TORNADO_HOMECONNECT:   /* 3c450-TX */
 1192                 sc->xl_media = XL_MEDIAOPT_BTX;
 1193                 sc->xl_xcvr = XL_XCVR_AUTO;
 1194                 if (verbose)
 1195                         device_printf(sc->xl_dev, "guessing 10/100 internal\n");
 1196                 break;
 1197         case TC_DEVICEID_CYCLONE_10_100_COMBO:  /* 3c905B-COMBO */
 1198                 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
 1199                 sc->xl_xcvr = XL_XCVR_AUTO;
 1200                 if (verbose)
 1201                         device_printf(sc->xl_dev,
 1202                             "guessing 10/100 plus BNC/AUI\n");
 1203                 break;
 1204         default:
 1205                 device_printf(sc->xl_dev,
 1206                     "unknown device ID: %x -- defaulting to 10baseT\n", devid);
 1207                 sc->xl_media = XL_MEDIAOPT_BT;
 1208                 break;
 1209         }
 1210 }
 1211 
 1212 /*
 1213  * Attach the interface. Allocate softc structures, do ifmedia
 1214  * setup and ethernet/BPF attach.
 1215  */
 1216 static int
 1217 xl_attach(device_t dev)
 1218 {
 1219         u_char                  eaddr[ETHER_ADDR_LEN];
 1220         u_int16_t               xcvr[2];
 1221         struct xl_softc         *sc;
 1222         struct ifnet            *ifp;
 1223         int                     media;
 1224         int                     unit, error = 0, rid, res;
 1225         uint16_t                did;
 1226 
 1227         sc = device_get_softc(dev);
 1228         sc->xl_dev = dev;
 1229         
 1230         unit = device_get_unit(dev);
 1231 
 1232         mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1233             MTX_DEF);
 1234         ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
 1235 
 1236         did = pci_get_device(dev);
 1237 
 1238         sc->xl_flags = 0;
 1239         if (did == TC_DEVICEID_HURRICANE_555)
 1240                 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
 1241         if (did == TC_DEVICEID_HURRICANE_556 ||
 1242             did == TC_DEVICEID_HURRICANE_556B)
 1243                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
 1244                     XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
 1245                     XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
 1246         if (did == TC_DEVICEID_HURRICANE_555 ||
 1247             did == TC_DEVICEID_HURRICANE_556)
 1248                 sc->xl_flags |= XL_FLAG_8BITROM;
 1249         if (did == TC_DEVICEID_HURRICANE_556B)
 1250                 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
 1251 
 1252         if (did == TC_DEVICEID_HURRICANE_575B ||
 1253             did == TC_DEVICEID_HURRICANE_575C ||
 1254             did == TC_DEVICEID_HURRICANE_656B ||
 1255             did == TC_DEVICEID_TORNADO_656C)
 1256                 sc->xl_flags |= XL_FLAG_FUNCREG;
 1257         if (did == TC_DEVICEID_HURRICANE_575A ||
 1258             did == TC_DEVICEID_HURRICANE_575B ||
 1259             did == TC_DEVICEID_HURRICANE_575C ||
 1260             did == TC_DEVICEID_HURRICANE_656B ||
 1261             did == TC_DEVICEID_TORNADO_656C)
 1262                 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
 1263                   XL_FLAG_8BITROM;
 1264         if (did == TC_DEVICEID_HURRICANE_656)
 1265                 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
 1266         if (did == TC_DEVICEID_HURRICANE_575B)
 1267                 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
 1268         if (did == TC_DEVICEID_HURRICANE_575C)
 1269                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1270         if (did == TC_DEVICEID_TORNADO_656C)
 1271                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
 1272         if (did == TC_DEVICEID_HURRICANE_656 ||
 1273             did == TC_DEVICEID_HURRICANE_656B)
 1274                 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
 1275                     XL_FLAG_INVERT_LED_PWR;
 1276         if (did == TC_DEVICEID_TORNADO_10_100BT_920B ||
 1277             did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM)
 1278                 sc->xl_flags |= XL_FLAG_PHYOK;
 1279 
 1280         switch (did) {
 1281         case TC_DEVICEID_BOOMERANG_10_100BT:    /* 3c905-TX */
 1282         case TC_DEVICEID_HURRICANE_575A:
 1283         case TC_DEVICEID_HURRICANE_575B:
 1284         case TC_DEVICEID_HURRICANE_575C:
 1285                 sc->xl_flags |= XL_FLAG_NO_MMIO;
 1286                 break;
 1287         default:
 1288                 break;
 1289         }
 1290 
 1291         /*
 1292          * Map control/status registers.
 1293          */
 1294         pci_enable_busmaster(dev);
 1295 
 1296         if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
 1297                 rid = XL_PCI_LOMEM;
 1298                 res = SYS_RES_MEMORY;
 1299 
 1300                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1301         }
 1302 
 1303         if (sc->xl_res != NULL) {
 1304                 sc->xl_flags |= XL_FLAG_USE_MMIO;
 1305                 if (bootverbose)
 1306                         device_printf(dev, "using memory mapped I/O\n");
 1307         } else {
 1308                 rid = XL_PCI_LOIO;
 1309                 res = SYS_RES_IOPORT;
 1310                 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
 1311                 if (sc->xl_res == NULL) {
 1312                         device_printf(dev, "couldn't map ports/memory\n");
 1313                         error = ENXIO;
 1314                         goto fail;
 1315                 }
 1316                 if (bootverbose)
 1317                         device_printf(dev, "using port I/O\n");
 1318         }
 1319 
 1320         sc->xl_btag = rman_get_bustag(sc->xl_res);
 1321         sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
 1322 
 1323         if (sc->xl_flags & XL_FLAG_FUNCREG) {
 1324                 rid = XL_PCI_FUNCMEM;
 1325                 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
 1326                     RF_ACTIVE);
 1327 
 1328                 if (sc->xl_fres == NULL) {
 1329                         device_printf(dev, "couldn't map funcreg memory\n");
 1330                         error = ENXIO;
 1331                         goto fail;
 1332                 }
 1333 
 1334                 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
 1335                 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
 1336         }
 1337 
 1338         /* Allocate interrupt */
 1339         rid = 0;
 1340         sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 1341             RF_SHAREABLE | RF_ACTIVE);
 1342         if (sc->xl_irq == NULL) {
 1343                 device_printf(dev, "couldn't map interrupt\n");
 1344                 error = ENXIO;
 1345                 goto fail;
 1346         }
 1347 
 1348         /* Initialize interface name. */
 1349         ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
 1350         if (ifp == NULL) {
 1351                 device_printf(dev, "can not if_alloc()\n");
 1352                 error = ENOSPC;
 1353                 goto fail;
 1354         }
 1355         ifp->if_softc = sc;
 1356         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1357 
 1358         /* Reset the adapter. */
 1359         XL_LOCK(sc);
 1360         xl_reset(sc);
 1361         XL_UNLOCK(sc);
 1362 
 1363         /*
 1364          * Get station address from the EEPROM.
 1365          */
 1366         if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
 1367                 device_printf(dev, "failed to read station address\n");
 1368                 error = ENXIO;
 1369                 goto fail;
 1370         }
 1371 
 1372         sc->xl_unit = unit;
 1373         callout_init_mtx(&sc->xl_stat_callout, &sc->xl_mtx, 0);
 1374         TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc);
 1375 
 1376         /*
 1377          * Now allocate a tag for the DMA descriptor lists and a chunk
 1378          * of DMA-able memory based on the tag.  Also obtain the DMA
 1379          * addresses of the RX and TX ring, which we'll need later.
 1380          * All of our lists are allocated as a contiguous block
 1381          * of memory.
 1382          */
 1383         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1384             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1385             XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
 1386             &sc->xl_ldata.xl_rx_tag);
 1387         if (error) {
 1388                 device_printf(dev, "failed to allocate rx dma tag\n");
 1389                 goto fail;
 1390         }
 1391 
 1392         error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
 1393             (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1394             &sc->xl_ldata.xl_rx_dmamap);
 1395         if (error) {
 1396                 device_printf(dev, "no memory for rx list buffers!\n");
 1397                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1398                 sc->xl_ldata.xl_rx_tag = NULL;
 1399                 goto fail;
 1400         }
 1401 
 1402         error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
 1403             sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
 1404             XL_RX_LIST_SZ, xl_dma_map_addr,
 1405             &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
 1406         if (error) {
 1407                 device_printf(dev, "cannot get dma address of the rx ring!\n");
 1408                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1409                     sc->xl_ldata.xl_rx_dmamap);
 1410                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1411                 sc->xl_ldata.xl_rx_tag = NULL;
 1412                 goto fail;
 1413         }
 1414 
 1415         error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
 1416             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1417             XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
 1418             &sc->xl_ldata.xl_tx_tag);
 1419         if (error) {
 1420                 device_printf(dev, "failed to allocate tx dma tag\n");
 1421                 goto fail;
 1422         }
 1423 
 1424         error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
 1425             (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
 1426             &sc->xl_ldata.xl_tx_dmamap);
 1427         if (error) {
 1428                 device_printf(dev, "no memory for list buffers!\n");
 1429                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1430                 sc->xl_ldata.xl_tx_tag = NULL;
 1431                 goto fail;
 1432         }
 1433 
 1434         error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
 1435             sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
 1436             XL_TX_LIST_SZ, xl_dma_map_addr,
 1437             &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
 1438         if (error) {
 1439                 device_printf(dev, "cannot get dma address of the tx ring!\n");
 1440                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1441                     sc->xl_ldata.xl_tx_dmamap);
 1442                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1443                 sc->xl_ldata.xl_tx_tag = NULL;
 1444                 goto fail;
 1445         }
 1446 
 1447         /*
 1448          * Allocate a DMA tag for the mapping of mbufs.
 1449          */
 1450         error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
 1451             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1452             MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
 1453             NULL, &sc->xl_mtag);
 1454         if (error) {
 1455                 device_printf(dev, "failed to allocate mbuf dma tag\n");
 1456                 goto fail;
 1457         }
 1458 
 1459         /* We need a spare DMA map for the RX ring. */
 1460         error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
 1461         if (error)
 1462                 goto fail;
 1463 
 1464         /*
 1465          * Figure out the card type. 3c905B adapters have the
 1466          * 'supportsNoTxLength' bit set in the capabilities
 1467          * word in the EEPROM.
 1468          * Note: my 3c575C cardbus card lies. It returns a value
 1469          * of 0x1578 for its capabilities word, which is somewhat
 1470          * nonsensical. Another way to distinguish a 3c90x chip
 1471          * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
 1472          * bit. This will only be set for 3c90x boomerage chips.
 1473          */
 1474         xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
 1475         if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
 1476             !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
 1477                 sc->xl_type = XL_TYPE_905B;
 1478         else
 1479                 sc->xl_type = XL_TYPE_90X;
 1480 
 1481         /* Set the TX start threshold for best performance. */
 1482         sc->xl_tx_thresh = XL_MIN_FRAMELEN;
 1483 
 1484         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1485         ifp->if_ioctl = xl_ioctl;
 1486         ifp->if_capabilities = IFCAP_VLAN_MTU;
 1487         if (sc->xl_type == XL_TYPE_905B) {
 1488                 ifp->if_hwassist = XL905B_CSUM_FEATURES;
 1489 #ifdef XL905B_TXCSUM_BROKEN
 1490                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1491 #else
 1492                 ifp->if_capabilities |= IFCAP_HWCSUM;
 1493 #endif
 1494         }
 1495         ifp->if_capenable = ifp->if_capabilities;
 1496 #ifdef DEVICE_POLLING
 1497         ifp->if_capabilities |= IFCAP_POLLING;
 1498 #endif
 1499         ifp->if_start = xl_start;
 1500         ifp->if_init = xl_init;
 1501         IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
 1502         ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
 1503         IFQ_SET_READY(&ifp->if_snd);
 1504 
 1505         /*
 1506          * Now we have to see what sort of media we have.
 1507          * This includes probing for an MII interace and a
 1508          * possible PHY.
 1509          */
 1510         XL_SEL_WIN(3);
 1511         sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
 1512         if (bootverbose)
 1513                 device_printf(dev, "media options word: %x\n", sc->xl_media);
 1514 
 1515         xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
 1516         sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
 1517         sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
 1518         sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
 1519 
 1520         xl_mediacheck(sc);
 1521 
 1522         if (sc->xl_media & XL_MEDIAOPT_MII ||
 1523             sc->xl_media & XL_MEDIAOPT_BTX ||
 1524             sc->xl_media & XL_MEDIAOPT_BT4) {
 1525                 if (bootverbose)
 1526                         device_printf(dev, "found MII/AUTO\n");
 1527                 xl_setcfg(sc);
 1528                 if (mii_phy_probe(dev, &sc->xl_miibus,
 1529                     xl_ifmedia_upd, xl_ifmedia_sts)) {
 1530                         device_printf(dev, "no PHY found!\n");
 1531                         error = ENXIO;
 1532                         goto fail;
 1533                 }
 1534                 goto done;
 1535         }
 1536 
 1537         /*
 1538          * Sanity check. If the user has selected "auto" and this isn't
 1539          * a 10/100 card of some kind, we need to force the transceiver
 1540          * type to something sane.
 1541          */
 1542         if (sc->xl_xcvr == XL_XCVR_AUTO)
 1543                 xl_choose_xcvr(sc, bootverbose);
 1544 
 1545         /*
 1546          * Do ifmedia setup.
 1547          */
 1548         if (sc->xl_media & XL_MEDIAOPT_BT) {
 1549                 if (bootverbose)
 1550                         device_printf(dev, "found 10baseT\n");
 1551                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
 1552                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
 1553                 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1554                         ifmedia_add(&sc->ifmedia,
 1555                             IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
 1556         }
 1557 
 1558         if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
 1559                 /*
 1560                  * Check for a 10baseFL board in disguise.
 1561                  */
 1562                 if (sc->xl_type == XL_TYPE_905B &&
 1563                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1564                         if (bootverbose)
 1565                                 device_printf(dev, "found 10baseFL\n");
 1566                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
 1567                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
 1568                             0, NULL);
 1569                         if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
 1570                                 ifmedia_add(&sc->ifmedia,
 1571                                     IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
 1572                 } else {
 1573                         if (bootverbose)
 1574                                 device_printf(dev, "found AUI\n");
 1575                         ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
 1576                 }
 1577         }
 1578 
 1579         if (sc->xl_media & XL_MEDIAOPT_BNC) {
 1580                 if (bootverbose)
 1581                         device_printf(dev, "found BNC\n");
 1582                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
 1583         }
 1584 
 1585         if (sc->xl_media & XL_MEDIAOPT_BFX) {
 1586                 if (bootverbose)
 1587                         device_printf(dev, "found 100baseFX\n");
 1588                 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
 1589         }
 1590 
 1591         media = IFM_ETHER|IFM_100_TX|IFM_FDX;
 1592         xl_choose_media(sc, &media);
 1593 
 1594         if (sc->xl_miibus == NULL)
 1595                 ifmedia_set(&sc->ifmedia, media);
 1596 
 1597 done:
 1598         if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
 1599                 XL_SEL_WIN(0);
 1600                 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
 1601         }
 1602 
 1603         /*
 1604          * Call MI attach routine.
 1605          */
 1606         ether_ifattach(ifp, eaddr);
 1607 
 1608         error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE,
 1609             xl_intr, sc, &sc->xl_intrhand);
 1610         if (error) {
 1611                 device_printf(dev, "couldn't set up irq\n");
 1612                 ether_ifdetach(ifp);
 1613                 goto fail;
 1614         }
 1615 
 1616 fail:
 1617         if (error)
 1618                 xl_detach(dev);
 1619 
 1620         return (error);
 1621 }
 1622 
 1623 /*
 1624  * Choose a default media.
 1625  * XXX This is a leaf function only called by xl_attach() and
 1626  *     acquires/releases the non-recursible driver mutex to
 1627  *     satisfy lock assertions.
 1628  */
 1629 static void
 1630 xl_choose_media(struct xl_softc *sc, int *media)
 1631 {
 1632 
 1633         XL_LOCK(sc);
 1634 
 1635         switch (sc->xl_xcvr) {
 1636         case XL_XCVR_10BT:
 1637                 *media = IFM_ETHER|IFM_10_T;
 1638                 xl_setmode(sc, *media);
 1639                 break;
 1640         case XL_XCVR_AUI:
 1641                 if (sc->xl_type == XL_TYPE_905B &&
 1642                     sc->xl_media == XL_MEDIAOPT_10FL) {
 1643                         *media = IFM_ETHER|IFM_10_FL;
 1644                         xl_setmode(sc, *media);
 1645                 } else {
 1646                         *media = IFM_ETHER|IFM_10_5;
 1647                         xl_setmode(sc, *media);
 1648                 }
 1649                 break;
 1650         case XL_XCVR_COAX:
 1651                 *media = IFM_ETHER|IFM_10_2;
 1652                 xl_setmode(sc, *media);
 1653                 break;
 1654         case XL_XCVR_AUTO:
 1655         case XL_XCVR_100BTX:
 1656         case XL_XCVR_MII:
 1657                 /* Chosen by miibus */
 1658                 break;
 1659         case XL_XCVR_100BFX:
 1660                 *media = IFM_ETHER|IFM_100_FX;
 1661                 break;
 1662         default:
 1663                 device_printf(sc->xl_dev, "unknown XCVR type: %d\n",
 1664                     sc->xl_xcvr);
 1665                 /*
 1666                  * This will probably be wrong, but it prevents
 1667                  * the ifmedia code from panicking.
 1668                  */
 1669                 *media = IFM_ETHER|IFM_10_T;
 1670                 break;
 1671         }
 1672 
 1673         XL_UNLOCK(sc);
 1674 }
 1675 
 1676 /*
 1677  * Shutdown hardware and free up resources. This can be called any
 1678  * time after the mutex has been initialized. It is called in both
 1679  * the error case in attach and the normal detach case so it needs
 1680  * to be careful about only freeing resources that have actually been
 1681  * allocated.
 1682  */
 1683 static int
 1684 xl_detach(device_t dev)
 1685 {
 1686         struct xl_softc         *sc;
 1687         struct ifnet            *ifp;
 1688         int                     rid, res;
 1689 
 1690         sc = device_get_softc(dev);
 1691         ifp = sc->xl_ifp;
 1692 
 1693         KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
 1694 
 1695 #ifdef DEVICE_POLLING
 1696         if (ifp && ifp->if_capenable & IFCAP_POLLING)
 1697                 ether_poll_deregister(ifp);
 1698 #endif
 1699 
 1700         if (sc->xl_flags & XL_FLAG_USE_MMIO) {
 1701                 rid = XL_PCI_LOMEM;
 1702                 res = SYS_RES_MEMORY;
 1703         } else {
 1704                 rid = XL_PCI_LOIO;
 1705                 res = SYS_RES_IOPORT;
 1706         }
 1707 
 1708         /* These should only be active if attach succeeded */
 1709         if (device_is_attached(dev)) {
 1710                 XL_LOCK(sc);
 1711                 xl_reset(sc);
 1712                 xl_stop(sc);
 1713                 XL_UNLOCK(sc);
 1714                 taskqueue_drain(taskqueue_swi, &sc->xl_task);
 1715                 callout_drain(&sc->xl_stat_callout);
 1716                 ether_ifdetach(ifp);
 1717         }
 1718         if (sc->xl_miibus)
 1719                 device_delete_child(dev, sc->xl_miibus);
 1720         bus_generic_detach(dev);
 1721         ifmedia_removeall(&sc->ifmedia);
 1722 
 1723         if (sc->xl_intrhand)
 1724                 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
 1725         if (sc->xl_irq)
 1726                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
 1727         if (sc->xl_fres != NULL)
 1728                 bus_release_resource(dev, SYS_RES_MEMORY,
 1729                     XL_PCI_FUNCMEM, sc->xl_fres);
 1730         if (sc->xl_res)
 1731                 bus_release_resource(dev, res, rid, sc->xl_res);
 1732 
 1733         if (ifp)
 1734                 if_free(ifp);
 1735 
 1736         if (sc->xl_mtag) {
 1737                 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
 1738                 bus_dma_tag_destroy(sc->xl_mtag);
 1739         }
 1740         if (sc->xl_ldata.xl_rx_tag) {
 1741                 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
 1742                     sc->xl_ldata.xl_rx_dmamap);
 1743                 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
 1744                     sc->xl_ldata.xl_rx_dmamap);
 1745                 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
 1746         }
 1747         if (sc->xl_ldata.xl_tx_tag) {
 1748                 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
 1749                     sc->xl_ldata.xl_tx_dmamap);
 1750                 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
 1751                     sc->xl_ldata.xl_tx_dmamap);
 1752                 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
 1753         }
 1754 
 1755         mtx_destroy(&sc->xl_mtx);
 1756 
 1757         return (0);
 1758 }
 1759 
 1760 /*
 1761  * Initialize the transmit descriptors.
 1762  */
 1763 static int
 1764 xl_list_tx_init(struct xl_softc *sc)
 1765 {
 1766         struct xl_chain_data    *cd;
 1767         struct xl_list_data     *ld;
 1768         int                     error, i;
 1769 
 1770         XL_LOCK_ASSERT(sc);
 1771 
 1772         cd = &sc->xl_cdata;
 1773         ld = &sc->xl_ldata;
 1774         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1775                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1776                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1777                     &cd->xl_tx_chain[i].xl_map);
 1778                 if (error)
 1779                         return (error);
 1780                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1781                     i * sizeof(struct xl_list);
 1782                 if (i == (XL_TX_LIST_CNT - 1))
 1783                         cd->xl_tx_chain[i].xl_next = NULL;
 1784                 else
 1785                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1786         }
 1787 
 1788         cd->xl_tx_free = &cd->xl_tx_chain[0];
 1789         cd->xl_tx_tail = cd->xl_tx_head = NULL;
 1790 
 1791         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1792         return (0);
 1793 }
 1794 
 1795 /*
 1796  * Initialize the transmit descriptors.
 1797  */
 1798 static int
 1799 xl_list_tx_init_90xB(struct xl_softc *sc)
 1800 {
 1801         struct xl_chain_data    *cd;
 1802         struct xl_list_data     *ld;
 1803         int                     error, i;
 1804 
 1805         XL_LOCK_ASSERT(sc);
 1806 
 1807         cd = &sc->xl_cdata;
 1808         ld = &sc->xl_ldata;
 1809         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 1810                 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
 1811                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1812                     &cd->xl_tx_chain[i].xl_map);
 1813                 if (error)
 1814                         return (error);
 1815                 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
 1816                     i * sizeof(struct xl_list);
 1817                 if (i == (XL_TX_LIST_CNT - 1))
 1818                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
 1819                 else
 1820                         cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
 1821                 if (i == 0)
 1822                         cd->xl_tx_chain[i].xl_prev =
 1823                             &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
 1824                 else
 1825                         cd->xl_tx_chain[i].xl_prev =
 1826                             &cd->xl_tx_chain[i - 1];
 1827         }
 1828 
 1829         bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
 1830         ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
 1831 
 1832         cd->xl_tx_prod = 1;
 1833         cd->xl_tx_cons = 1;
 1834         cd->xl_tx_cnt = 0;
 1835 
 1836         bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
 1837         return (0);
 1838 }
 1839 
 1840 /*
 1841  * Initialize the RX descriptors and allocate mbufs for them. Note that
 1842  * we arrange the descriptors in a closed ring, so that the last descriptor
 1843  * points back to the first.
 1844  */
 1845 static int
 1846 xl_list_rx_init(struct xl_softc *sc)
 1847 {
 1848         struct xl_chain_data    *cd;
 1849         struct xl_list_data     *ld;
 1850         int                     error, i, next;
 1851         u_int32_t               nextptr;
 1852 
 1853         XL_LOCK_ASSERT(sc);
 1854 
 1855         cd = &sc->xl_cdata;
 1856         ld = &sc->xl_ldata;
 1857 
 1858         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1859                 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
 1860                 error = bus_dmamap_create(sc->xl_mtag, 0,
 1861                     &cd->xl_rx_chain[i].xl_map);
 1862                 if (error)
 1863                         return (error);
 1864                 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
 1865                 if (error)
 1866                         return (error);
 1867                 if (i == (XL_RX_LIST_CNT - 1))
 1868                         next = 0;
 1869                 else
 1870                         next = i + 1;
 1871                 nextptr = ld->xl_rx_dmaaddr +
 1872                     next * sizeof(struct xl_list_onefrag);
 1873                 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
 1874                 ld->xl_rx_list[i].xl_next = htole32(nextptr);
 1875         }
 1876 
 1877         bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 1878         cd->xl_rx_head = &cd->xl_rx_chain[0];
 1879 
 1880         return (0);
 1881 }
 1882 
 1883 /*
 1884  * Initialize an RX descriptor and attach an MBUF cluster.
 1885  * If we fail to do so, we need to leave the old mbuf and
 1886  * the old DMA map untouched so that it can be reused.
 1887  */
 1888 static int
 1889 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
 1890 {
 1891         struct mbuf             *m_new = NULL;
 1892         bus_dmamap_t            map;
 1893         int                     error;
 1894         u_int32_t               baddr;
 1895 
 1896         XL_LOCK_ASSERT(sc);
 1897 
 1898         m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
 1899         if (m_new == NULL)
 1900                 return (ENOBUFS);
 1901 
 1902         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
 1903 
 1904         /* Force longword alignment for packet payload. */
 1905         m_adj(m_new, ETHER_ALIGN);
 1906 
 1907         error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
 1908             xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
 1909         if (error) {
 1910                 m_freem(m_new);
 1911                 device_printf(sc->xl_dev, "can't map mbuf (error %d)\n",
 1912                     error);
 1913                 return (error);
 1914         }
 1915 
 1916         bus_dmamap_unload(sc->xl_mtag, c->xl_map);
 1917         map = c->xl_map;
 1918         c->xl_map = sc->xl_tmpmap;
 1919         sc->xl_tmpmap = map;
 1920         c->xl_mbuf = m_new;
 1921         c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
 1922         c->xl_ptr->xl_status = 0;
 1923         c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
 1924         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
 1925         return (0);
 1926 }
 1927 
 1928 static int
 1929 xl_rx_resync(struct xl_softc *sc)
 1930 {
 1931         struct xl_chain_onefrag *pos;
 1932         int                     i;
 1933 
 1934         XL_LOCK_ASSERT(sc);
 1935 
 1936         pos = sc->xl_cdata.xl_rx_head;
 1937 
 1938         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 1939                 if (pos->xl_ptr->xl_status)
 1940                         break;
 1941                 pos = pos->xl_next;
 1942         }
 1943 
 1944         if (i == XL_RX_LIST_CNT)
 1945                 return (0);
 1946 
 1947         sc->xl_cdata.xl_rx_head = pos;
 1948 
 1949         return (EAGAIN);
 1950 }
 1951 
 1952 /*
 1953  * A frame has been uploaded: pass the resulting mbuf chain up to
 1954  * the higher level protocols.
 1955  */
 1956 static void
 1957 xl_rxeof(struct xl_softc *sc)
 1958 {
 1959         struct mbuf             *m;
 1960         struct ifnet            *ifp = sc->xl_ifp;
 1961         struct xl_chain_onefrag *cur_rx;
 1962         int                     total_len = 0;
 1963         u_int32_t               rxstat;
 1964 
 1965         XL_LOCK_ASSERT(sc);
 1966 again:
 1967         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
 1968             BUS_DMASYNC_POSTREAD);
 1969         while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
 1970 #ifdef DEVICE_POLLING
 1971                 if (ifp->if_capenable & IFCAP_POLLING) {
 1972                         if (sc->rxcycles <= 0)
 1973                                 break;
 1974                         sc->rxcycles--;
 1975                 }
 1976 #endif
 1977                 cur_rx = sc->xl_cdata.xl_rx_head;
 1978                 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
 1979                 total_len = rxstat & XL_RXSTAT_LENMASK;
 1980 
 1981                 /*
 1982                  * Since we have told the chip to allow large frames,
 1983                  * we need to trap giant frame errors in software. We allow
 1984                  * a little more than the normal frame size to account for
 1985                  * frames with VLAN tags.
 1986                  */
 1987                 if (total_len > XL_MAX_FRAMELEN)
 1988                         rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
 1989 
 1990                 /*
 1991                  * If an error occurs, update stats, clear the
 1992                  * status word and leave the mbuf cluster in place:
 1993                  * it should simply get re-used next time this descriptor
 1994                  * comes up in the ring.
 1995                  */
 1996                 if (rxstat & XL_RXSTAT_UP_ERROR) {
 1997                         ifp->if_ierrors++;
 1998                         cur_rx->xl_ptr->xl_status = 0;
 1999                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 2000                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 2001                         continue;
 2002                 }
 2003 
 2004                 /*
 2005                  * If the error bit was not set, the upload complete
 2006                  * bit should be set which means we have a valid packet.
 2007                  * If not, something truly strange has happened.
 2008                  */
 2009                 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
 2010                         device_printf(sc->xl_dev,
 2011                             "bad receive status -- packet dropped\n");
 2012                         ifp->if_ierrors++;
 2013                         cur_rx->xl_ptr->xl_status = 0;
 2014                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 2015                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 2016                         continue;
 2017                 }
 2018 
 2019                 /* No errors; receive the packet. */
 2020                 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
 2021                     BUS_DMASYNC_POSTREAD);
 2022                 m = cur_rx->xl_mbuf;
 2023 
 2024                 /*
 2025                  * Try to conjure up a new mbuf cluster. If that
 2026                  * fails, it means we have an out of memory condition and
 2027                  * should leave the buffer in place and continue. This will
 2028                  * result in a lost packet, but there's little else we
 2029                  * can do in this situation.
 2030                  */
 2031                 if (xl_newbuf(sc, cur_rx)) {
 2032                         ifp->if_ierrors++;
 2033                         cur_rx->xl_ptr->xl_status = 0;
 2034                         bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 2035                             sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 2036                         continue;
 2037                 }
 2038                 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
 2039                     sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
 2040 
 2041                 ifp->if_ipackets++;
 2042                 m->m_pkthdr.rcvif = ifp;
 2043                 m->m_pkthdr.len = m->m_len = total_len;
 2044 
 2045                 if (ifp->if_capenable & IFCAP_RXCSUM) {
 2046                         /* Do IP checksum checking. */
 2047                         if (rxstat & XL_RXSTAT_IPCKOK)
 2048                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 2049                         if (!(rxstat & XL_RXSTAT_IPCKERR))
 2050                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2051                         if ((rxstat & XL_RXSTAT_TCPCOK &&
 2052                              !(rxstat & XL_RXSTAT_TCPCKERR)) ||
 2053                             (rxstat & XL_RXSTAT_UDPCKOK &&
 2054                              !(rxstat & XL_RXSTAT_UDPCKERR))) {
 2055                                 m->m_pkthdr.csum_flags |=
 2056                                         CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
 2057                                 m->m_pkthdr.csum_data = 0xffff;
 2058                         }
 2059                 }
 2060 
 2061                 XL_UNLOCK(sc);
 2062                 (*ifp->if_input)(ifp, m);
 2063                 XL_LOCK(sc);
 2064 
 2065                 /*
 2066                  * If we are running from the taskqueue, the interface
 2067                  * might have been stopped while we were passing the last
 2068                  * packet up the network stack.
 2069                  */
 2070                 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 2071                         return;
 2072         }
 2073 
 2074         /*
 2075          * Handle the 'end of channel' condition. When the upload
 2076          * engine hits the end of the RX ring, it will stall. This
 2077          * is our cue to flush the RX ring, reload the uplist pointer
 2078          * register and unstall the engine.
 2079          * XXX This is actually a little goofy. With the ThunderLAN
 2080          * chip, you get an interrupt when the receiver hits the end
 2081          * of the receive ring, which tells you exactly when you
 2082          * you need to reload the ring pointer. Here we have to
 2083          * fake it. I'm mad at myself for not being clever enough
 2084          * to avoid the use of a goto here.
 2085          */
 2086         if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
 2087                 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
 2088                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2089                 xl_wait(sc);
 2090                 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2091                 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
 2092                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2093                 goto again;
 2094         }
 2095 }
 2096 
 2097 /*
 2098  * Taskqueue wrapper for xl_rxeof().
 2099  */
 2100 static void
 2101 xl_rxeof_task(void *arg, int pending)
 2102 {
 2103         struct xl_softc *sc = (struct xl_softc *)arg;
 2104 
 2105         NET_LOCK_GIANT();
 2106         XL_LOCK(sc);
 2107         if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING)
 2108                 xl_rxeof(sc);
 2109         XL_UNLOCK(sc);
 2110         NET_UNLOCK_GIANT();
 2111 }
 2112 
 2113 /*
 2114  * A frame was downloaded to the chip. It's safe for us to clean up
 2115  * the list buffers.
 2116  */
 2117 static void
 2118 xl_txeof(struct xl_softc *sc)
 2119 {
 2120         struct xl_chain         *cur_tx;
 2121         struct ifnet            *ifp = sc->xl_ifp;
 2122 
 2123         XL_LOCK_ASSERT(sc);
 2124 
 2125         /*
 2126          * Go through our tx list and free mbufs for those
 2127          * frames that have been uploaded. Note: the 3c905B
 2128          * sets a special bit in the status word to let us
 2129          * know that a frame has been downloaded, but the
 2130          * original 3c900/3c905 adapters don't do that.
 2131          * Consequently, we have to use a different test if
 2132          * xl_type != XL_TYPE_905B.
 2133          */
 2134         while (sc->xl_cdata.xl_tx_head != NULL) {
 2135                 cur_tx = sc->xl_cdata.xl_tx_head;
 2136 
 2137                 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2138                         break;
 2139 
 2140                 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
 2141                 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2142                     BUS_DMASYNC_POSTWRITE);
 2143                 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2144                 m_freem(cur_tx->xl_mbuf);
 2145                 cur_tx->xl_mbuf = NULL;
 2146                 ifp->if_opackets++;
 2147 
 2148                 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
 2149                 sc->xl_cdata.xl_tx_free = cur_tx;
 2150         }
 2151 
 2152         if (sc->xl_cdata.xl_tx_head == NULL) {
 2153                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2154                 sc->xl_wdog_timer = 0;
 2155                 sc->xl_cdata.xl_tx_tail = NULL;
 2156         } else {
 2157                 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
 2158                         !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
 2159                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2160                                 sc->xl_cdata.xl_tx_head->xl_phys);
 2161                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2162                 }
 2163         }
 2164 }
 2165 
 2166 static void
 2167 xl_txeof_90xB(struct xl_softc *sc)
 2168 {
 2169         struct xl_chain         *cur_tx = NULL;
 2170         struct ifnet            *ifp = sc->xl_ifp;
 2171         int                     idx;
 2172 
 2173         XL_LOCK_ASSERT(sc);
 2174 
 2175         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2176             BUS_DMASYNC_POSTREAD);
 2177         idx = sc->xl_cdata.xl_tx_cons;
 2178         while (idx != sc->xl_cdata.xl_tx_prod) {
 2179 
 2180                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2181 
 2182                 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
 2183                       XL_TXSTAT_DL_COMPLETE))
 2184                         break;
 2185 
 2186                 if (cur_tx->xl_mbuf != NULL) {
 2187                         bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
 2188                             BUS_DMASYNC_POSTWRITE);
 2189                         bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
 2190                         m_freem(cur_tx->xl_mbuf);
 2191                         cur_tx->xl_mbuf = NULL;
 2192                 }
 2193 
 2194                 ifp->if_opackets++;
 2195 
 2196                 sc->xl_cdata.xl_tx_cnt--;
 2197                 XL_INC(idx, XL_TX_LIST_CNT);
 2198         }
 2199 
 2200         if (sc->xl_cdata.xl_tx_cnt == 0)
 2201                 sc->xl_wdog_timer = 0;
 2202         sc->xl_cdata.xl_tx_cons = idx;
 2203 
 2204         if (cur_tx != NULL)
 2205                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2206 }
 2207 
 2208 /*
 2209  * TX 'end of channel' interrupt handler. Actually, we should
 2210  * only get a 'TX complete' interrupt if there's a transmit error,
 2211  * so this is really TX error handler.
 2212  */
 2213 static void
 2214 xl_txeoc(struct xl_softc *sc)
 2215 {
 2216         u_int8_t                txstat;
 2217 
 2218         XL_LOCK_ASSERT(sc);
 2219 
 2220         while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
 2221                 if (txstat & XL_TXSTATUS_UNDERRUN ||
 2222                         txstat & XL_TXSTATUS_JABBER ||
 2223                         txstat & XL_TXSTATUS_RECLAIM) {
 2224                         device_printf(sc->xl_dev,
 2225                             "transmission error: %x\n", txstat);
 2226                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2227                         xl_wait(sc);
 2228                         if (sc->xl_type == XL_TYPE_905B) {
 2229                                 if (sc->xl_cdata.xl_tx_cnt) {
 2230                                         int                     i;
 2231                                         struct xl_chain         *c;
 2232 
 2233                                         i = sc->xl_cdata.xl_tx_cons;
 2234                                         c = &sc->xl_cdata.xl_tx_chain[i];
 2235                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2236                                             c->xl_phys);
 2237                                         CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2238                                 }
 2239                         } else {
 2240                                 if (sc->xl_cdata.xl_tx_head != NULL)
 2241                                         CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2242                                             sc->xl_cdata.xl_tx_head->xl_phys);
 2243                         }
 2244                         /*
 2245                          * Remember to set this for the
 2246                          * first generation 3c90X chips.
 2247                          */
 2248                         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2249                         if (txstat & XL_TXSTATUS_UNDERRUN &&
 2250                             sc->xl_tx_thresh < XL_PACKET_SIZE) {
 2251                                 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
 2252                                 device_printf(sc->xl_dev,
 2253 "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh);
 2254                         }
 2255                         CSR_WRITE_2(sc, XL_COMMAND,
 2256                             XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2257                         if (sc->xl_type == XL_TYPE_905B) {
 2258                                 CSR_WRITE_2(sc, XL_COMMAND,
 2259                                 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2260                         }
 2261                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2262                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2263                 } else {
 2264                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2265                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2266                 }
 2267                 /*
 2268                  * Write an arbitrary byte to the TX_STATUS register
 2269                  * to clear this interrupt/error and advance to the next.
 2270                  */
 2271                 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
 2272         }
 2273 }
 2274 
 2275 static void
 2276 xl_intr(void *arg)
 2277 {
 2278         struct xl_softc         *sc = arg;
 2279         struct ifnet            *ifp = sc->xl_ifp;
 2280         u_int16_t               status;
 2281 
 2282         XL_LOCK(sc);
 2283 
 2284 #ifdef DEVICE_POLLING
 2285         if (ifp->if_capenable & IFCAP_POLLING) {
 2286                 XL_UNLOCK(sc);
 2287                 return;
 2288         }
 2289 #endif
 2290 
 2291         while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS &&
 2292             status != 0xFFFF) {
 2293                 CSR_WRITE_2(sc, XL_COMMAND,
 2294                     XL_CMD_INTR_ACK|(status & XL_INTRS));
 2295 
 2296                 if (status & XL_STAT_UP_COMPLETE) {
 2297                         int     curpkts;
 2298 
 2299                         curpkts = ifp->if_ipackets;
 2300                         xl_rxeof(sc);
 2301                         if (curpkts == ifp->if_ipackets) {
 2302                                 while (xl_rx_resync(sc))
 2303                                         xl_rxeof(sc);
 2304                         }
 2305                 }
 2306 
 2307                 if (status & XL_STAT_DOWN_COMPLETE) {
 2308                         if (sc->xl_type == XL_TYPE_905B)
 2309                                 xl_txeof_90xB(sc);
 2310                         else
 2311                                 xl_txeof(sc);
 2312                 }
 2313 
 2314                 if (status & XL_STAT_TX_COMPLETE) {
 2315                         ifp->if_oerrors++;
 2316                         xl_txeoc(sc);
 2317                 }
 2318 
 2319                 if (status & XL_STAT_ADFAIL) {
 2320                         xl_reset(sc);
 2321                         xl_init_locked(sc);
 2322                 }
 2323 
 2324                 if (status & XL_STAT_STATSOFLOW) {
 2325                         sc->xl_stats_no_timeout = 1;
 2326                         xl_stats_update_locked(sc);
 2327                         sc->xl_stats_no_timeout = 0;
 2328                 }
 2329         }
 2330 
 2331         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2332                 if (sc->xl_type == XL_TYPE_905B)
 2333                         xl_start_90xB_locked(ifp);
 2334                 else
 2335                         xl_start_locked(ifp);
 2336         }
 2337 
 2338         XL_UNLOCK(sc);
 2339 }
 2340 
 2341 #ifdef DEVICE_POLLING
 2342 static void
 2343 xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2344 {
 2345         struct xl_softc *sc = ifp->if_softc;
 2346 
 2347         XL_LOCK(sc);
 2348         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 2349                 xl_poll_locked(ifp, cmd, count);
 2350         XL_UNLOCK(sc);
 2351 }
 2352 
 2353 static void
 2354 xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
 2355 {
 2356         struct xl_softc *sc = ifp->if_softc;
 2357 
 2358         XL_LOCK_ASSERT(sc);
 2359 
 2360         sc->rxcycles = count;
 2361         xl_rxeof(sc);
 2362         if (sc->xl_type == XL_TYPE_905B)
 2363                 xl_txeof_90xB(sc);
 2364         else
 2365                 xl_txeof(sc);
 2366 
 2367         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 2368                 if (sc->xl_type == XL_TYPE_905B)
 2369                         xl_start_90xB_locked(ifp);
 2370                 else
 2371                         xl_start_locked(ifp);
 2372         }
 2373 
 2374         if (cmd == POLL_AND_CHECK_STATUS) {
 2375                 u_int16_t status;
 2376 
 2377                 status = CSR_READ_2(sc, XL_STATUS);
 2378                 if (status & XL_INTRS && status != 0xFFFF) {
 2379                         CSR_WRITE_2(sc, XL_COMMAND,
 2380                             XL_CMD_INTR_ACK|(status & XL_INTRS));
 2381 
 2382                         if (status & XL_STAT_TX_COMPLETE) {
 2383                                 ifp->if_oerrors++;
 2384                                 xl_txeoc(sc);
 2385                         }
 2386 
 2387                         if (status & XL_STAT_ADFAIL) {
 2388                                 xl_reset(sc);
 2389                                 xl_init_locked(sc);
 2390                         }
 2391 
 2392                         if (status & XL_STAT_STATSOFLOW) {
 2393                                 sc->xl_stats_no_timeout = 1;
 2394                                 xl_stats_update_locked(sc);
 2395                                 sc->xl_stats_no_timeout = 0;
 2396                         }
 2397                 }
 2398         }
 2399 }
 2400 #endif /* DEVICE_POLLING */
 2401 
 2402 /*
 2403  * XXX: This is an entry point for callout which needs to take the lock.
 2404  */
 2405 static void
 2406 xl_stats_update(void *xsc)
 2407 {
 2408         struct xl_softc *sc = xsc;
 2409 
 2410         XL_LOCK_ASSERT(sc);
 2411 
 2412         if (xl_watchdog(sc) == EJUSTRETURN)
 2413                 return;
 2414 
 2415         xl_stats_update_locked(sc);
 2416 }
 2417 
 2418 static void
 2419 xl_stats_update_locked(struct xl_softc *sc)
 2420 {
 2421         struct ifnet            *ifp = sc->xl_ifp;
 2422         struct xl_stats         xl_stats;
 2423         u_int8_t                *p;
 2424         int                     i;
 2425         struct mii_data         *mii = NULL;
 2426 
 2427         XL_LOCK_ASSERT(sc);
 2428 
 2429         bzero((char *)&xl_stats, sizeof(struct xl_stats));
 2430 
 2431         if (sc->xl_miibus != NULL)
 2432                 mii = device_get_softc(sc->xl_miibus);
 2433 
 2434         p = (u_int8_t *)&xl_stats;
 2435 
 2436         /* Read all the stats registers. */
 2437         XL_SEL_WIN(6);
 2438 
 2439         for (i = 0; i < 16; i++)
 2440                 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
 2441 
 2442         ifp->if_ierrors += xl_stats.xl_rx_overrun;
 2443 
 2444         ifp->if_collisions += xl_stats.xl_tx_multi_collision +
 2445             xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision;
 2446 
 2447         /*
 2448          * Boomerang and cyclone chips have an extra stats counter
 2449          * in window 4 (BadSSD). We have to read this too in order
 2450          * to clear out all the stats registers and avoid a statsoflow
 2451          * interrupt.
 2452          */
 2453         XL_SEL_WIN(4);
 2454         CSR_READ_1(sc, XL_W4_BADSSD);
 2455 
 2456         if ((mii != NULL) && (!sc->xl_stats_no_timeout))
 2457                 mii_tick(mii);
 2458 
 2459         XL_SEL_WIN(7);
 2460 
 2461         if (!sc->xl_stats_no_timeout)
 2462                 callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 2463 }
 2464 
 2465 /*
 2466  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
 2467  * pointers to the fragment pointers.
 2468  */
 2469 static int
 2470 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
 2471 {
 2472         int                     error;
 2473         u_int32_t               status;
 2474         struct ifnet            *ifp = sc->xl_ifp;
 2475 
 2476         XL_LOCK_ASSERT(sc);
 2477 
 2478         /*
 2479          * Start packing the mbufs in this chain into
 2480          * the fragment pointers. Stop when we run out
 2481          * of fragments or hit the end of the mbuf chain.
 2482          */
 2483         error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
 2484             xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
 2485 
 2486         if (error && error != EFBIG) {
 2487                 m_freem(m_head);
 2488                 if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2489                 return (1);
 2490         }
 2491 
 2492         /*
 2493          * Handle special case: we used up all 63 fragments,
 2494          * but we have more mbufs left in the chain. Copy the
 2495          * data into an mbuf cluster. Note that we don't
 2496          * bother clearing the values in the other fragment
 2497          * pointers/counters; it wouldn't gain us anything,
 2498          * and would waste cycles.
 2499          */
 2500         if (error) {
 2501                 struct mbuf             *m_new;
 2502 
 2503                 m_new = m_defrag(m_head, M_DONTWAIT);
 2504                 if (m_new == NULL) {
 2505                         m_freem(m_head);
 2506                         return (1);
 2507                 } else {
 2508                         m_head = m_new;
 2509                 }
 2510 
 2511                 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
 2512                         m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
 2513                 if (error) {
 2514                         m_freem(m_head);
 2515                         if_printf(ifp, "can't map mbuf (error %d)\n", error);
 2516                         return (1);
 2517                 }
 2518         }
 2519 
 2520         if (sc->xl_type == XL_TYPE_905B) {
 2521                 status = XL_TXSTAT_RND_DEFEAT;
 2522 
 2523 #ifndef XL905B_TXCSUM_BROKEN
 2524                 if (m_head->m_pkthdr.csum_flags) {
 2525                         if (m_head->m_pkthdr.csum_flags & CSUM_IP)
 2526                                 status |= XL_TXSTAT_IPCKSUM;
 2527                         if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
 2528                                 status |= XL_TXSTAT_TCPCKSUM;
 2529                         if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
 2530                                 status |= XL_TXSTAT_UDPCKSUM;
 2531                 }
 2532 #endif
 2533                 c->xl_ptr->xl_status = htole32(status);
 2534         }
 2535 
 2536         c->xl_mbuf = m_head;
 2537         bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
 2538         return (0);
 2539 }
 2540 
 2541 /*
 2542  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
 2543  * to the mbuf data regions directly in the transmit lists. We also save a
 2544  * copy of the pointers since the transmit list fragment pointers are
 2545  * physical addresses.
 2546  */
 2547 
 2548 static void
 2549 xl_start(struct ifnet *ifp)
 2550 {
 2551         struct xl_softc         *sc = ifp->if_softc;
 2552 
 2553         XL_LOCK(sc);
 2554 
 2555         if (sc->xl_type == XL_TYPE_905B)
 2556                 xl_start_90xB_locked(ifp);
 2557         else
 2558                 xl_start_locked(ifp);
 2559 
 2560         XL_UNLOCK(sc);
 2561 }
 2562 
 2563 static void
 2564 xl_start_locked(struct ifnet *ifp)
 2565 {
 2566         struct xl_softc         *sc = ifp->if_softc;
 2567         struct mbuf             *m_head = NULL;
 2568         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2569         struct xl_chain         *prev_tx;
 2570         u_int32_t               status;
 2571         int                     error;
 2572 
 2573         XL_LOCK_ASSERT(sc);
 2574 
 2575         /*
 2576          * Check for an available queue slot. If there are none,
 2577          * punt.
 2578          */
 2579         if (sc->xl_cdata.xl_tx_free == NULL) {
 2580                 xl_txeoc(sc);
 2581                 xl_txeof(sc);
 2582                 if (sc->xl_cdata.xl_tx_free == NULL) {
 2583                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2584                         return;
 2585                 }
 2586         }
 2587 
 2588         start_tx = sc->xl_cdata.xl_tx_free;
 2589 
 2590         while (sc->xl_cdata.xl_tx_free != NULL) {
 2591                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2592                 if (m_head == NULL)
 2593                         break;
 2594 
 2595                 /* Pick a descriptor off the free list. */
 2596                 prev_tx = cur_tx;
 2597                 cur_tx = sc->xl_cdata.xl_tx_free;
 2598 
 2599                 /* Pack the data into the descriptor. */
 2600                 error = xl_encap(sc, cur_tx, m_head);
 2601                 if (error) {
 2602                         cur_tx = prev_tx;
 2603                         continue;
 2604                 }
 2605 
 2606                 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
 2607                 cur_tx->xl_next = NULL;
 2608 
 2609                 /* Chain it together. */
 2610                 if (prev != NULL) {
 2611                         prev->xl_next = cur_tx;
 2612                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2613                 }
 2614                 prev = cur_tx;
 2615 
 2616                 /*
 2617                  * If there's a BPF listener, bounce a copy of this frame
 2618                  * to him.
 2619                  */
 2620                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2621         }
 2622 
 2623         /*
 2624          * If there are no packets queued, bail.
 2625          */
 2626         if (cur_tx == NULL)
 2627                 return;
 2628 
 2629         /*
 2630          * Place the request for the upload interrupt
 2631          * in the last descriptor in the chain. This way, if
 2632          * we're chaining several packets at once, we'll only
 2633          * get an interupt once for the whole chain rather than
 2634          * once for each packet.
 2635          */
 2636         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2637             XL_TXSTAT_DL_INTR);
 2638         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2639             BUS_DMASYNC_PREWRITE);
 2640 
 2641         /*
 2642          * Queue the packets. If the TX channel is clear, update
 2643          * the downlist pointer register.
 2644          */
 2645         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2646         xl_wait(sc);
 2647 
 2648         if (sc->xl_cdata.xl_tx_head != NULL) {
 2649                 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
 2650                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
 2651                     htole32(start_tx->xl_phys);
 2652                 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
 2653                 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
 2654                     htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
 2655                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2656         } else {
 2657                 sc->xl_cdata.xl_tx_head = start_tx;
 2658                 sc->xl_cdata.xl_tx_tail = cur_tx;
 2659         }
 2660         if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
 2661                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
 2662 
 2663         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2664 
 2665         XL_SEL_WIN(7);
 2666 
 2667         /*
 2668          * Set a timeout in case the chip goes out to lunch.
 2669          */
 2670         sc->xl_wdog_timer = 5;
 2671 
 2672         /*
 2673          * XXX Under certain conditions, usually on slower machines
 2674          * where interrupts may be dropped, it's possible for the
 2675          * adapter to chew up all the buffers in the receive ring
 2676          * and stall, without us being able to do anything about it.
 2677          * To guard against this, we need to make a pass over the
 2678          * RX queue to make sure there aren't any packets pending.
 2679          * Doing it here means we can flush the receive ring at the
 2680          * same time the chip is DMAing the transmit descriptors we
 2681          * just gave it.
 2682          *
 2683          * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
 2684          * nature of their chips in all their marketing literature;
 2685          * we may as well take advantage of it. :)
 2686          */
 2687         taskqueue_enqueue(taskqueue_swi, &sc->xl_task);
 2688 }
 2689 
 2690 static void
 2691 xl_start_90xB_locked(struct ifnet *ifp)
 2692 {
 2693         struct xl_softc         *sc = ifp->if_softc;
 2694         struct mbuf             *m_head = NULL;
 2695         struct xl_chain         *prev = NULL, *cur_tx = NULL, *start_tx;
 2696         struct xl_chain         *prev_tx;
 2697         int                     error, idx;
 2698 
 2699         XL_LOCK_ASSERT(sc);
 2700 
 2701         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
 2702                 return;
 2703 
 2704         idx = sc->xl_cdata.xl_tx_prod;
 2705         start_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2706 
 2707         while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
 2708 
 2709                 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
 2710                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2711                         break;
 2712                 }
 2713 
 2714                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2715                 if (m_head == NULL)
 2716                         break;
 2717 
 2718                 prev_tx = cur_tx;
 2719                 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
 2720 
 2721                 /* Pack the data into the descriptor. */
 2722                 error = xl_encap(sc, cur_tx, m_head);
 2723                 if (error) {
 2724                         cur_tx = prev_tx;
 2725                         continue;
 2726                 }
 2727 
 2728                 /* Chain it together. */
 2729                 if (prev != NULL)
 2730                         prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
 2731                 prev = cur_tx;
 2732 
 2733                 /*
 2734                  * If there's a BPF listener, bounce a copy of this frame
 2735                  * to him.
 2736                  */
 2737                 BPF_MTAP(ifp, cur_tx->xl_mbuf);
 2738 
 2739                 XL_INC(idx, XL_TX_LIST_CNT);
 2740                 sc->xl_cdata.xl_tx_cnt++;
 2741         }
 2742 
 2743         /*
 2744          * If there are no packets queued, bail.
 2745          */
 2746         if (cur_tx == NULL)
 2747                 return;
 2748 
 2749         /*
 2750          * Place the request for the upload interrupt
 2751          * in the last descriptor in the chain. This way, if
 2752          * we're chaining several packets at once, we'll only
 2753          * get an interupt once for the whole chain rather than
 2754          * once for each packet.
 2755          */
 2756         cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
 2757             XL_TXSTAT_DL_INTR);
 2758         bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
 2759             BUS_DMASYNC_PREWRITE);
 2760 
 2761         /* Start transmission */
 2762         sc->xl_cdata.xl_tx_prod = idx;
 2763         start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
 2764 
 2765         /*
 2766          * Set a timeout in case the chip goes out to lunch.
 2767          */
 2768         sc->xl_wdog_timer = 5;
 2769 }
 2770 
 2771 static void
 2772 xl_init(void *xsc)
 2773 {
 2774         struct xl_softc         *sc = xsc;
 2775 
 2776         XL_LOCK(sc);
 2777         xl_init_locked(sc);
 2778         XL_UNLOCK(sc);
 2779 }
 2780 
 2781 static void
 2782 xl_init_locked(struct xl_softc *sc)
 2783 {
 2784         struct ifnet            *ifp = sc->xl_ifp;
 2785         int                     error, i;
 2786         u_int16_t               rxfilt = 0;
 2787         struct mii_data         *mii = NULL;
 2788 
 2789         XL_LOCK_ASSERT(sc);
 2790 
 2791         /*
 2792          * Cancel pending I/O and free all RX/TX buffers.
 2793          */
 2794         xl_stop(sc);
 2795 
 2796         if (sc->xl_miibus == NULL) {
 2797                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2798                 xl_wait(sc);
 2799         }
 2800         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2801         xl_wait(sc);
 2802         DELAY(10000);
 2803 
 2804         if (sc->xl_miibus != NULL)
 2805                 mii = device_get_softc(sc->xl_miibus);
 2806 
 2807         /* Init our MAC address */
 2808         XL_SEL_WIN(2);
 2809         for (i = 0; i < ETHER_ADDR_LEN; i++) {
 2810                 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
 2811                                 IFP2ENADDR(sc->xl_ifp)[i]);
 2812         }
 2813 
 2814         /* Clear the station mask. */
 2815         for (i = 0; i < 3; i++)
 2816                 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
 2817 #ifdef notdef
 2818         /* Reset TX and RX. */
 2819         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 2820         xl_wait(sc);
 2821         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 2822         xl_wait(sc);
 2823 #endif
 2824         /* Init circular RX list. */
 2825         error = xl_list_rx_init(sc);
 2826         if (error) {
 2827                 device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n",
 2828                     error);
 2829                 xl_stop(sc);
 2830                 return;
 2831         }
 2832 
 2833         /* Init TX descriptors. */
 2834         if (sc->xl_type == XL_TYPE_905B)
 2835                 error = xl_list_tx_init_90xB(sc);
 2836         else
 2837                 error = xl_list_tx_init(sc);
 2838         if (error) {
 2839                 device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n",
 2840                     error);
 2841                 xl_stop(sc);
 2842                 return;
 2843         }
 2844 
 2845         /*
 2846          * Set the TX freethresh value.
 2847          * Note that this has no effect on 3c905B "cyclone"
 2848          * cards but is required for 3c900/3c905 "boomerang"
 2849          * cards in order to enable the download engine.
 2850          */
 2851         CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
 2852 
 2853         /* Set the TX start threshold for best performance. */
 2854         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
 2855 
 2856         /*
 2857          * If this is a 3c905B, also set the tx reclaim threshold.
 2858          * This helps cut down on the number of tx reclaim errors
 2859          * that could happen on a busy network. The chip multiplies
 2860          * the register value by 16 to obtain the actual threshold
 2861          * in bytes, so we divide by 16 when setting the value here.
 2862          * The existing threshold value can be examined by reading
 2863          * the register at offset 9 in window 5.
 2864          */
 2865         if (sc->xl_type == XL_TYPE_905B) {
 2866                 CSR_WRITE_2(sc, XL_COMMAND,
 2867                     XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
 2868         }
 2869 
 2870         /* Set RX filter bits. */
 2871         XL_SEL_WIN(5);
 2872         rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 2873 
 2874         /* Set the individual bit to receive frames for this host only. */
 2875         rxfilt |= XL_RXFILTER_INDIVIDUAL;
 2876 
 2877         /* If we want promiscuous mode, set the allframes bit. */
 2878         if (ifp->if_flags & IFF_PROMISC) {
 2879                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 2880                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2881         } else {
 2882                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 2883                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2884         }
 2885 
 2886         /*
 2887          * Set capture broadcast bit to capture broadcast frames.
 2888          */
 2889         if (ifp->if_flags & IFF_BROADCAST) {
 2890                 rxfilt |= XL_RXFILTER_BROADCAST;
 2891                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2892         } else {
 2893                 rxfilt &= ~XL_RXFILTER_BROADCAST;
 2894                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
 2895         }
 2896 
 2897         /*
 2898          * Program the multicast filter, if necessary.
 2899          */
 2900         if (sc->xl_type == XL_TYPE_905B)
 2901                 xl_setmulti_hash(sc);
 2902         else
 2903                 xl_setmulti(sc);
 2904 
 2905         /*
 2906          * Load the address of the RX list. We have to
 2907          * stall the upload engine before we can manipulate
 2908          * the uplist pointer register, then unstall it when
 2909          * we're finished. We also have to wait for the
 2910          * stall command to complete before proceeding.
 2911          * Note that we have to do this after any RX resets
 2912          * have completed since the uplist register is cleared
 2913          * by a reset.
 2914          */
 2915         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
 2916         xl_wait(sc);
 2917         CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
 2918         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
 2919         xl_wait(sc);
 2920 
 2921         if (sc->xl_type == XL_TYPE_905B) {
 2922                 /* Set polling interval */
 2923                 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
 2924                 /* Load the address of the TX list */
 2925                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
 2926                 xl_wait(sc);
 2927                 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
 2928                     sc->xl_cdata.xl_tx_chain[0].xl_phys);
 2929                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
 2930                 xl_wait(sc);
 2931         }
 2932 
 2933         /*
 2934          * If the coax transceiver is on, make sure to enable
 2935          * the DC-DC converter.
 2936          */
 2937         XL_SEL_WIN(3);
 2938         if (sc->xl_xcvr == XL_XCVR_COAX)
 2939                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
 2940         else
 2941                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 2942 
 2943         /*
 2944          * increase packet size to allow reception of 802.1q or ISL packets.
 2945          * For the 3c90x chip, set the 'allow large packets' bit in the MAC
 2946          * control register. For 3c90xB/C chips, use the RX packet size
 2947          * register.
 2948          */
 2949 
 2950         if (sc->xl_type == XL_TYPE_905B)
 2951                 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
 2952         else {
 2953                 u_int8_t macctl;
 2954                 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
 2955                 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
 2956                 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
 2957         }
 2958 
 2959         /* Clear out the stats counters. */
 2960         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 2961         sc->xl_stats_no_timeout = 1;
 2962         xl_stats_update_locked(sc);
 2963         sc->xl_stats_no_timeout = 0;
 2964         XL_SEL_WIN(4);
 2965         CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
 2966         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
 2967 
 2968         /*
 2969          * Enable interrupts.
 2970          */
 2971         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 2972         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
 2973 #ifdef DEVICE_POLLING
 2974         /* Disable interrupts if we are polling. */
 2975         if (ifp->if_capenable & IFCAP_POLLING)
 2976                 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 2977         else
 2978 #endif
 2979         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 2980         if (sc->xl_flags & XL_FLAG_FUNCREG)
 2981             bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 2982 
 2983         /* Set the RX early threshold */
 2984         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
 2985         CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
 2986 
 2987         /* Enable receiver and transmitter. */
 2988         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
 2989         xl_wait(sc);
 2990         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
 2991         xl_wait(sc);
 2992 
 2993         /* XXX Downcall to miibus. */
 2994         if (mii != NULL)
 2995                 mii_mediachg(mii);
 2996 
 2997         /* Select window 7 for normal operations. */
 2998         XL_SEL_WIN(7);
 2999 
 3000         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3001         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3002 
 3003         sc->xl_wdog_timer = 0;
 3004         callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc);
 3005 }
 3006 
 3007 /*
 3008  * Set media options.
 3009  */
 3010 static int
 3011 xl_ifmedia_upd(struct ifnet *ifp)
 3012 {
 3013         struct xl_softc         *sc = ifp->if_softc;
 3014         struct ifmedia          *ifm = NULL;
 3015         struct mii_data         *mii = NULL;
 3016 
 3017         XL_LOCK(sc);
 3018 
 3019         if (sc->xl_miibus != NULL)
 3020                 mii = device_get_softc(sc->xl_miibus);
 3021         if (mii == NULL)
 3022                 ifm = &sc->ifmedia;
 3023         else
 3024                 ifm = &mii->mii_media;
 3025 
 3026         switch (IFM_SUBTYPE(ifm->ifm_media)) {
 3027         case IFM_100_FX:
 3028         case IFM_10_FL:
 3029         case IFM_10_2:
 3030         case IFM_10_5:
 3031                 xl_setmode(sc, ifm->ifm_media);
 3032                 return (0);
 3033                 break;
 3034         default:
 3035                 break;
 3036         }
 3037 
 3038         if (sc->xl_media & XL_MEDIAOPT_MII ||
 3039             sc->xl_media & XL_MEDIAOPT_BTX ||
 3040             sc->xl_media & XL_MEDIAOPT_BT4) {
 3041                 xl_init_locked(sc);
 3042         } else {
 3043                 xl_setmode(sc, ifm->ifm_media);
 3044         }
 3045 
 3046         XL_UNLOCK(sc);
 3047 
 3048         return (0);
 3049 }
 3050 
 3051 /*
 3052  * Report current media status.
 3053  */
 3054 static void
 3055 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
 3056 {
 3057         struct xl_softc         *sc = ifp->if_softc;
 3058         u_int32_t               icfg;
 3059         u_int16_t               status = 0;
 3060         struct mii_data         *mii = NULL;
 3061 
 3062         XL_LOCK(sc);
 3063 
 3064         if (sc->xl_miibus != NULL)
 3065                 mii = device_get_softc(sc->xl_miibus);
 3066 
 3067         XL_SEL_WIN(4);
 3068         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3069 
 3070         XL_SEL_WIN(3);
 3071         icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
 3072         icfg >>= XL_ICFG_CONNECTOR_BITS;
 3073 
 3074         ifmr->ifm_active = IFM_ETHER;
 3075         ifmr->ifm_status = IFM_AVALID;
 3076 
 3077         if ((status & XL_MEDIASTAT_CARRIER) == 0)
 3078                 ifmr->ifm_status |= IFM_ACTIVE;
 3079 
 3080         switch (icfg) {
 3081         case XL_XCVR_10BT:
 3082                 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
 3083                 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3084                         ifmr->ifm_active |= IFM_FDX;
 3085                 else
 3086                         ifmr->ifm_active |= IFM_HDX;
 3087                 break;
 3088         case XL_XCVR_AUI:
 3089                 if (sc->xl_type == XL_TYPE_905B &&
 3090                     sc->xl_media == XL_MEDIAOPT_10FL) {
 3091                         ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
 3092                         if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
 3093                                 ifmr->ifm_active |= IFM_FDX;
 3094                         else
 3095                                 ifmr->ifm_active |= IFM_HDX;
 3096                 } else
 3097                         ifmr->ifm_active = IFM_ETHER|IFM_10_5;
 3098                 break;
 3099         case XL_XCVR_COAX:
 3100                 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
 3101                 break;
 3102         /*
 3103          * XXX MII and BTX/AUTO should be separate cases.
 3104          */
 3105 
 3106         case XL_XCVR_100BTX:
 3107         case XL_XCVR_AUTO:
 3108         case XL_XCVR_MII:
 3109                 if (mii != NULL) {
 3110                         mii_pollstat(mii);
 3111                         ifmr->ifm_active = mii->mii_media_active;
 3112                         ifmr->ifm_status = mii->mii_media_status;
 3113                 }
 3114                 break;
 3115         case XL_XCVR_100BFX:
 3116                 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
 3117                 break;
 3118         default:
 3119                 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
 3120                 break;
 3121         }
 3122 
 3123         XL_UNLOCK(sc);
 3124 }
 3125 
 3126 static int
 3127 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 3128 {
 3129         struct xl_softc         *sc = ifp->if_softc;
 3130         struct ifreq            *ifr = (struct ifreq *) data;
 3131         int                     error = 0;
 3132         struct mii_data         *mii = NULL;
 3133         u_int8_t                rxfilt;
 3134 
 3135         switch (command) {
 3136         case SIOCSIFFLAGS:
 3137                 XL_LOCK(sc);
 3138 
 3139                 XL_SEL_WIN(5);
 3140                 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
 3141                 if (ifp->if_flags & IFF_UP) {
 3142                         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3143                             ifp->if_flags & IFF_PROMISC &&
 3144                             !(sc->xl_if_flags & IFF_PROMISC)) {
 3145                                 rxfilt |= XL_RXFILTER_ALLFRAMES;
 3146                                 CSR_WRITE_2(sc, XL_COMMAND,
 3147                                     XL_CMD_RX_SET_FILT|rxfilt);
 3148                                 XL_SEL_WIN(7);
 3149                         } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
 3150                             !(ifp->if_flags & IFF_PROMISC) &&
 3151                             sc->xl_if_flags & IFF_PROMISC) {
 3152                                 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
 3153                                 CSR_WRITE_2(sc, XL_COMMAND,
 3154                                     XL_CMD_RX_SET_FILT|rxfilt);
 3155                                 XL_SEL_WIN(7);
 3156                         } else {
 3157                                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 3158                                         xl_init_locked(sc);
 3159                         }
 3160                 } else {
 3161                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3162                                 xl_stop(sc);
 3163                 }
 3164                 sc->xl_if_flags = ifp->if_flags;
 3165                 XL_UNLOCK(sc);
 3166                 error = 0;
 3167                 break;
 3168         case SIOCADDMULTI:
 3169         case SIOCDELMULTI:
 3170                 /* XXX Downcall from if_addmulti() possibly with locks held. */
 3171                 XL_LOCK(sc);
 3172                 if (sc->xl_type == XL_TYPE_905B)
 3173                         xl_setmulti_hash(sc);
 3174                 else
 3175                         xl_setmulti(sc);
 3176                 XL_UNLOCK(sc);
 3177                 error = 0;
 3178                 break;
 3179         case SIOCGIFMEDIA:
 3180         case SIOCSIFMEDIA:
 3181                 if (sc->xl_miibus != NULL)
 3182                         mii = device_get_softc(sc->xl_miibus);
 3183                 if (mii == NULL)
 3184                         error = ifmedia_ioctl(ifp, ifr,
 3185                             &sc->ifmedia, command);
 3186                 else
 3187                         error = ifmedia_ioctl(ifp, ifr,
 3188                             &mii->mii_media, command);
 3189                 break;
 3190         case SIOCSIFCAP:
 3191 #ifdef DEVICE_POLLING
 3192                 if (ifr->ifr_reqcap & IFCAP_POLLING &&
 3193                     !(ifp->if_capenable & IFCAP_POLLING)) {
 3194                         error = ether_poll_register(xl_poll, ifp);
 3195                         if (error)
 3196                                 return(error);
 3197                         XL_LOCK(sc);
 3198                         /* Disable interrupts */
 3199                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3200                         ifp->if_capenable |= IFCAP_POLLING;
 3201                         XL_UNLOCK(sc);
 3202                         return (error);
 3203                         
 3204                 }
 3205                 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
 3206                     ifp->if_capenable & IFCAP_POLLING) {
 3207                         error = ether_poll_deregister(ifp);
 3208                         /* Enable interrupts. */
 3209                         XL_LOCK(sc);
 3210                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
 3211                         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
 3212                         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3213                                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle,
 3214                                     4, 0x8000);
 3215                         ifp->if_capenable &= ~IFCAP_POLLING;
 3216                         XL_UNLOCK(sc);
 3217                         return (error);
 3218                 }
 3219 #endif /* DEVICE_POLLING */
 3220                 XL_LOCK(sc);
 3221                 ifp->if_capenable = ifr->ifr_reqcap;
 3222                 if (ifp->if_capenable & IFCAP_TXCSUM)
 3223                         ifp->if_hwassist = XL905B_CSUM_FEATURES;
 3224                 else
 3225                         ifp->if_hwassist = 0;
 3226                 XL_UNLOCK(sc);
 3227                 break;
 3228         default:
 3229                 error = ether_ioctl(ifp, command, data);
 3230                 break;
 3231         }
 3232 
 3233         return (error);
 3234 }
 3235 
 3236 static int
 3237 xl_watchdog(struct xl_softc *sc)
 3238 {
 3239         struct ifnet            *ifp = sc->xl_ifp;
 3240         u_int16_t               status = 0;
 3241 
 3242         XL_LOCK_ASSERT(sc);
 3243 
 3244         if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0)
 3245                 return (0);
 3246 
 3247         ifp->if_oerrors++;
 3248         XL_SEL_WIN(4);
 3249         status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
 3250         device_printf(sc->xl_dev, "watchdog timeout\n");
 3251 
 3252         if (status & XL_MEDIASTAT_CARRIER)
 3253                 device_printf(sc->xl_dev,
 3254                     "no carrier - transceiver cable problem?\n");
 3255 
 3256         xl_txeoc(sc);
 3257         xl_txeof(sc);
 3258         xl_rxeof(sc);
 3259         xl_reset(sc);
 3260         xl_init_locked(sc);
 3261 
 3262         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
 3263                 if (sc->xl_type == XL_TYPE_905B)
 3264                         xl_start_90xB_locked(ifp);
 3265                 else
 3266                         xl_start_locked(ifp);
 3267         }
 3268 
 3269         return (EJUSTRETURN);
 3270 }
 3271 
 3272 /*
 3273  * Stop the adapter and free any mbufs allocated to the
 3274  * RX and TX lists.
 3275  */
 3276 static void
 3277 xl_stop(struct xl_softc *sc)
 3278 {
 3279         register int            i;
 3280         struct ifnet            *ifp = sc->xl_ifp;
 3281 
 3282         XL_LOCK_ASSERT(sc);
 3283 
 3284         sc->xl_wdog_timer = 0;
 3285 
 3286         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
 3287         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
 3288         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
 3289         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
 3290         xl_wait(sc);
 3291         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
 3292         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
 3293         DELAY(800);
 3294 
 3295 #ifdef foo
 3296         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
 3297         xl_wait(sc);
 3298         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
 3299         xl_wait(sc);
 3300 #endif
 3301 
 3302         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
 3303         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
 3304         CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
 3305         if (sc->xl_flags & XL_FLAG_FUNCREG)
 3306                 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
 3307 
 3308         /* Stop the stats updater. */
 3309         callout_stop(&sc->xl_stat_callout);
 3310 
 3311         /*
 3312          * Free data in the RX lists.
 3313          */
 3314         for (i = 0; i < XL_RX_LIST_CNT; i++) {
 3315                 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
 3316                         bus_dmamap_unload(sc->xl_mtag,
 3317                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3318                         bus_dmamap_destroy(sc->xl_mtag,
 3319                             sc->xl_cdata.xl_rx_chain[i].xl_map);
 3320                         m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
 3321                         sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
 3322                 }
 3323         }
 3324         if (sc->xl_ldata.xl_rx_list != NULL)
 3325                 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
 3326         /*
 3327          * Free the TX list buffers.
 3328          */
 3329         for (i = 0; i < XL_TX_LIST_CNT; i++) {
 3330                 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
 3331                         bus_dmamap_unload(sc->xl_mtag,
 3332                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3333                         bus_dmamap_destroy(sc->xl_mtag,
 3334                             sc->xl_cdata.xl_tx_chain[i].xl_map);
 3335                         m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
 3336                         sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
 3337                 }
 3338         }
 3339         if (sc->xl_ldata.xl_tx_list != NULL)
 3340                 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
 3341 
 3342         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 3343 }
 3344 
 3345 /*
 3346  * Stop all chip I/O so that the kernel's probe routines don't
 3347  * get confused by errant DMAs when rebooting.
 3348  */
 3349 static void
 3350 xl_shutdown(device_t dev)
 3351 {
 3352         struct xl_softc         *sc;
 3353 
 3354         sc = device_get_softc(dev);
 3355 
 3356         XL_LOCK(sc);
 3357         xl_reset(sc);
 3358         xl_stop(sc);
 3359         XL_UNLOCK(sc);
 3360 }
 3361 
 3362 static int
 3363 xl_suspend(device_t dev)
 3364 {
 3365         struct xl_softc         *sc;
 3366 
 3367         sc = device_get_softc(dev);
 3368 
 3369         XL_LOCK(sc);
 3370         xl_stop(sc);
 3371         XL_UNLOCK(sc);
 3372 
 3373         return (0);
 3374 }
 3375 
 3376 static int
 3377 xl_resume(device_t dev)
 3378 {
 3379         struct xl_softc         *sc;
 3380         struct ifnet            *ifp;
 3381 
 3382         sc = device_get_softc(dev);
 3383         ifp = sc->xl_ifp;
 3384 
 3385         XL_LOCK(sc);
 3386 
 3387         xl_reset(sc);
 3388         if (ifp->if_flags & IFF_UP)
 3389                 xl_init_locked(sc);
 3390 
 3391         XL_UNLOCK(sc);
 3392 
 3393         return (0);
 3394 }

Cache object: d5a8d751e5677b26fd4b9d15ff6b73fc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.