The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/sk/if_sk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 1997, 1998, 1999, 2000
    5  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. All advertising materials mentioning features or use of this software
   16  *    must display the following acknowledgement:
   17  *      This product includes software developed by Bill Paul.
   18  * 4. Neither the name of the author nor the names of any co-contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   32  * THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 /*-
   35  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   36  *
   37  * Permission to use, copy, modify, and distribute this software for any
   38  * purpose with or without fee is hereby granted, provided that the above
   39  * copyright notice and this permission notice appear in all copies.
   40  *
   41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   48  */
   49 
   50 #include <sys/cdefs.h>
   51 __FBSDID("$FreeBSD: releng/8.4/sys/dev/sk/if_sk.c 230714 2012-01-29 01:22:48Z marius $");
   52 
   53 /*
   54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
   55  * the SK-984x series adapters, both single port and dual port.
   56  * References:
   57  *      The XaQti XMAC II datasheet,
   58  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   59  *      The SysKonnect GEnesis manual, http://www.syskonnect.com
   60  *
   61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
   62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
   63  * convenience to others until Vitesse corrects this problem:
   64  *
   65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   66  *
   67  * Written by Bill Paul <wpaul@ee.columbia.edu>
   68  * Department of Electrical Engineering
   69  * Columbia University, New York City
   70  */
   71 /*
   72  * The SysKonnect gigabit ethernet adapters consist of two main
   73  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
   74  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
   75  * components and a PHY while the GEnesis controller provides a PCI
   76  * interface with DMA support. Each card may have between 512K and
   77  * 2MB of SRAM on board depending on the configuration.
   78  *
   79  * The SysKonnect GEnesis controller can have either one or two XMAC
   80  * chips connected to it, allowing single or dual port NIC configurations.
   81  * SysKonnect has the distinction of being the only vendor on the market
   82  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
   83  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
   84  * XMAC registers. This driver takes advantage of these features to allow
   85  * both XMACs to operate as independent interfaces.
   86  */
   87 
   88 #include <sys/param.h>
   89 #include <sys/systm.h>
   90 #include <sys/bus.h>
   91 #include <sys/endian.h>
   92 #include <sys/mbuf.h>
   93 #include <sys/malloc.h>
   94 #include <sys/kernel.h>
   95 #include <sys/module.h>
   96 #include <sys/socket.h>
   97 #include <sys/sockio.h>
   98 #include <sys/queue.h>
   99 #include <sys/sysctl.h>
  100 
  101 #include <net/bpf.h>
  102 #include <net/ethernet.h>
  103 #include <net/if.h>
  104 #include <net/if_arp.h>
  105 #include <net/if_dl.h>
  106 #include <net/if_media.h>
  107 #include <net/if_types.h>
  108 #include <net/if_vlan_var.h>
  109 
  110 #include <netinet/in.h>
  111 #include <netinet/in_systm.h>
  112 #include <netinet/ip.h>
  113 
  114 #include <machine/bus.h>
  115 #include <machine/in_cksum.h>
  116 #include <machine/resource.h>
  117 #include <sys/rman.h>
  118 
  119 #include <dev/mii/mii.h>
  120 #include <dev/mii/miivar.h>
  121 #include <dev/mii/brgphyreg.h>
  122 
  123 #include <dev/pci/pcireg.h>
  124 #include <dev/pci/pcivar.h>
  125 
  126 #if 0
  127 #define SK_USEIOSPACE
  128 #endif
  129 
  130 #include <dev/sk/if_skreg.h>
  131 #include <dev/sk/xmaciireg.h>
  132 #include <dev/sk/yukonreg.h>
  133 
  134 MODULE_DEPEND(sk, pci, 1, 1, 1);
  135 MODULE_DEPEND(sk, ether, 1, 1, 1);
  136 MODULE_DEPEND(sk, miibus, 1, 1, 1);
  137 
  138 /* "device miibus" required.  See GENERIC if you get errors here. */
  139 #include "miibus_if.h"
  140 
  141 #ifndef lint
  142 static const char rcsid[] =
  143   "$FreeBSD: releng/8.4/sys/dev/sk/if_sk.c 230714 2012-01-29 01:22:48Z marius $";
  144 #endif
  145 
  146 static struct sk_type sk_devs[] = {
  147         {
  148                 VENDORID_SK,
  149                 DEVICEID_SK_V1,
  150                 "SysKonnect Gigabit Ethernet (V1.0)"
  151         },
  152         {
  153                 VENDORID_SK,
  154                 DEVICEID_SK_V2,
  155                 "SysKonnect Gigabit Ethernet (V2.0)"
  156         },
  157         {
  158                 VENDORID_MARVELL,
  159                 DEVICEID_SK_V2,
  160                 "Marvell Gigabit Ethernet"
  161         },
  162         {
  163                 VENDORID_MARVELL,
  164                 DEVICEID_BELKIN_5005,
  165                 "Belkin F5D5005 Gigabit Ethernet"
  166         },
  167         {
  168                 VENDORID_3COM,
  169                 DEVICEID_3COM_3C940,
  170                 "3Com 3C940 Gigabit Ethernet"
  171         },
  172         {
  173                 VENDORID_LINKSYS,
  174                 DEVICEID_LINKSYS_EG1032,
  175                 "Linksys EG1032 Gigabit Ethernet"
  176         },
  177         {
  178                 VENDORID_DLINK,
  179                 DEVICEID_DLINK_DGE530T_A1,
  180                 "D-Link DGE-530T Gigabit Ethernet"
  181         },
  182         {
  183                 VENDORID_DLINK,
  184                 DEVICEID_DLINK_DGE530T_B1,
  185                 "D-Link DGE-530T Gigabit Ethernet"
  186         },
  187         { 0, 0, NULL }
  188 };
  189 
  190 static int skc_probe(device_t);
  191 static int skc_attach(device_t);
  192 static int skc_detach(device_t);
  193 static int skc_shutdown(device_t);
  194 static int skc_suspend(device_t);
  195 static int skc_resume(device_t);
  196 static int sk_detach(device_t);
  197 static int sk_probe(device_t);
  198 static int sk_attach(device_t);
  199 static void sk_tick(void *);
  200 static void sk_yukon_tick(void *);
  201 static void sk_intr(void *);
  202 static void sk_intr_xmac(struct sk_if_softc *);
  203 static void sk_intr_bcom(struct sk_if_softc *);
  204 static void sk_intr_yukon(struct sk_if_softc *);
  205 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
  206 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
  207 static void sk_rxeof(struct sk_if_softc *);
  208 static void sk_jumbo_rxeof(struct sk_if_softc *);
  209 static void sk_txeof(struct sk_if_softc *);
  210 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
  211 static int sk_encap(struct sk_if_softc *, struct mbuf **);
  212 static void sk_start(struct ifnet *);
  213 static void sk_start_locked(struct ifnet *);
  214 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
  215 static void sk_init(void *);
  216 static void sk_init_locked(struct sk_if_softc *);
  217 static void sk_init_xmac(struct sk_if_softc *);
  218 static void sk_init_yukon(struct sk_if_softc *);
  219 static void sk_stop(struct sk_if_softc *);
  220 static void sk_watchdog(void *);
  221 static int sk_ifmedia_upd(struct ifnet *);
  222 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  223 static void sk_reset(struct sk_softc *);
  224 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
  225 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
  226 static int sk_newbuf(struct sk_if_softc *, int);
  227 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
  228 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  229 static int sk_dma_alloc(struct sk_if_softc *);
  230 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
  231 static void sk_dma_free(struct sk_if_softc *);
  232 static void sk_dma_jumbo_free(struct sk_if_softc *);
  233 static int sk_init_rx_ring(struct sk_if_softc *);
  234 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
  235 static void sk_init_tx_ring(struct sk_if_softc *);
  236 static u_int32_t sk_win_read_4(struct sk_softc *, int);
  237 static u_int16_t sk_win_read_2(struct sk_softc *, int);
  238 static u_int8_t sk_win_read_1(struct sk_softc *, int);
  239 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
  240 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
  241 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
  242 
  243 static int sk_miibus_readreg(device_t, int, int);
  244 static int sk_miibus_writereg(device_t, int, int, int);
  245 static void sk_miibus_statchg(device_t);
  246 
  247 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
  248 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
  249                                                 int);
  250 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
  251 
  252 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
  253 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
  254                                                 int);
  255 static void sk_marv_miibus_statchg(struct sk_if_softc *);
  256 
  257 static uint32_t sk_xmchash(const uint8_t *);
  258 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
  259 static void sk_rxfilter(struct sk_if_softc *);
  260 static void sk_rxfilter_genesis(struct sk_if_softc *);
  261 static void sk_rxfilter_yukon(struct sk_if_softc *);
  262 
  263 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
  264 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
  265 
  266 /* Tunables. */
  267 static int jumbo_disable = 0;
  268 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
  269  
  270 /*
  271  * It seems that SK-NET GENESIS supports very simple checksum offload
  272  * capability for Tx and I believe it can generate 0 checksum value for
  273  * UDP packets in Tx as the hardware can't differenciate UDP packets from
  274  * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
  275  * means sender didn't perforam checksum computation. For the safety I
  276  * disabled UDP checksum offload capability at the moment. Alternatively
  277  * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
  278  * offload routine.
  279  */
  280 #define SK_CSUM_FEATURES        (CSUM_TCP)
  281 
  282 /*
  283  * Note that we have newbus methods for both the GEnesis controller
  284  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
  285  * the miibus code is a child of the XMACs. We need to do it this way
  286  * so that the miibus drivers can access the PHY registers on the
  287  * right PHY. It's not quite what I had in mind, but it's the only
  288  * design that achieves the desired effect.
  289  */
  290 static device_method_t skc_methods[] = {
  291         /* Device interface */
  292         DEVMETHOD(device_probe,         skc_probe),
  293         DEVMETHOD(device_attach,        skc_attach),
  294         DEVMETHOD(device_detach,        skc_detach),
  295         DEVMETHOD(device_suspend,       skc_suspend),
  296         DEVMETHOD(device_resume,        skc_resume),
  297         DEVMETHOD(device_shutdown,      skc_shutdown),
  298 
  299         DEVMETHOD_END
  300 };
  301 
  302 static driver_t skc_driver = {
  303         "skc",
  304         skc_methods,
  305         sizeof(struct sk_softc)
  306 };
  307 
  308 static devclass_t skc_devclass;
  309 
  310 static device_method_t sk_methods[] = {
  311         /* Device interface */
  312         DEVMETHOD(device_probe,         sk_probe),
  313         DEVMETHOD(device_attach,        sk_attach),
  314         DEVMETHOD(device_detach,        sk_detach),
  315         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  316 
  317         /* MII interface */
  318         DEVMETHOD(miibus_readreg,       sk_miibus_readreg),
  319         DEVMETHOD(miibus_writereg,      sk_miibus_writereg),
  320         DEVMETHOD(miibus_statchg,       sk_miibus_statchg),
  321 
  322         DEVMETHOD_END
  323 };
  324 
  325 static driver_t sk_driver = {
  326         "sk",
  327         sk_methods,
  328         sizeof(struct sk_if_softc)
  329 };
  330 
  331 static devclass_t sk_devclass;
  332 
  333 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
  334 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
  335 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
  336 
  337 static struct resource_spec sk_res_spec_io[] = {
  338         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  339         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  340         { -1,                   0,              0 }
  341 };
  342 
  343 static struct resource_spec sk_res_spec_mem[] = {
  344         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  345         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  346         { -1,                   0,              0 }
  347 };
  348 
  349 #define SK_SETBIT(sc, reg, x)           \
  350         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
  351 
  352 #define SK_CLRBIT(sc, reg, x)           \
  353         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
  354 
  355 #define SK_WIN_SETBIT_4(sc, reg, x)     \
  356         sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
  357 
  358 #define SK_WIN_CLRBIT_4(sc, reg, x)     \
  359         sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
  360 
  361 #define SK_WIN_SETBIT_2(sc, reg, x)     \
  362         sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
  363 
  364 #define SK_WIN_CLRBIT_2(sc, reg, x)     \
  365         sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
  366 
  367 static u_int32_t
  368 sk_win_read_4(sc, reg)
  369         struct sk_softc         *sc;
  370         int                     reg;
  371 {
  372 #ifdef SK_USEIOSPACE
  373         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  374         return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
  375 #else
  376         return(CSR_READ_4(sc, reg));
  377 #endif
  378 }
  379 
  380 static u_int16_t
  381 sk_win_read_2(sc, reg)
  382         struct sk_softc         *sc;
  383         int                     reg;
  384 {
  385 #ifdef SK_USEIOSPACE
  386         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  387         return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
  388 #else
  389         return(CSR_READ_2(sc, reg));
  390 #endif
  391 }
  392 
  393 static u_int8_t
  394 sk_win_read_1(sc, reg)
  395         struct sk_softc         *sc;
  396         int                     reg;
  397 {
  398 #ifdef SK_USEIOSPACE
  399         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  400         return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
  401 #else
  402         return(CSR_READ_1(sc, reg));
  403 #endif
  404 }
  405 
  406 static void
  407 sk_win_write_4(sc, reg, val)
  408         struct sk_softc         *sc;
  409         int                     reg;
  410         u_int32_t               val;
  411 {
  412 #ifdef SK_USEIOSPACE
  413         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  414         CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
  415 #else
  416         CSR_WRITE_4(sc, reg, val);
  417 #endif
  418         return;
  419 }
  420 
  421 static void
  422 sk_win_write_2(sc, reg, val)
  423         struct sk_softc         *sc;
  424         int                     reg;
  425         u_int32_t               val;
  426 {
  427 #ifdef SK_USEIOSPACE
  428         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  429         CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
  430 #else
  431         CSR_WRITE_2(sc, reg, val);
  432 #endif
  433         return;
  434 }
  435 
  436 static void
  437 sk_win_write_1(sc, reg, val)
  438         struct sk_softc         *sc;
  439         int                     reg;
  440         u_int32_t               val;
  441 {
  442 #ifdef SK_USEIOSPACE
  443         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  444         CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
  445 #else
  446         CSR_WRITE_1(sc, reg, val);
  447 #endif
  448         return;
  449 }
  450 
  451 static int
  452 sk_miibus_readreg(dev, phy, reg)
  453         device_t                dev;
  454         int                     phy, reg;
  455 {
  456         struct sk_if_softc      *sc_if;
  457         int                     v;
  458 
  459         sc_if = device_get_softc(dev);
  460 
  461         SK_IF_MII_LOCK(sc_if);
  462         switch(sc_if->sk_softc->sk_type) {
  463         case SK_GENESIS:
  464                 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
  465                 break;
  466         case SK_YUKON:
  467         case SK_YUKON_LITE:
  468         case SK_YUKON_LP:
  469                 v = sk_marv_miibus_readreg(sc_if, phy, reg);
  470                 break;
  471         default:
  472                 v = 0;
  473                 break;
  474         }
  475         SK_IF_MII_UNLOCK(sc_if);
  476 
  477         return (v);
  478 }
  479 
  480 static int
  481 sk_miibus_writereg(dev, phy, reg, val)
  482         device_t                dev;
  483         int                     phy, reg, val;
  484 {
  485         struct sk_if_softc      *sc_if;
  486         int                     v;
  487 
  488         sc_if = device_get_softc(dev);
  489 
  490         SK_IF_MII_LOCK(sc_if);
  491         switch(sc_if->sk_softc->sk_type) {
  492         case SK_GENESIS:
  493                 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
  494                 break;
  495         case SK_YUKON:
  496         case SK_YUKON_LITE:
  497         case SK_YUKON_LP:
  498                 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
  499                 break;
  500         default:
  501                 v = 0;
  502                 break;
  503         }
  504         SK_IF_MII_UNLOCK(sc_if);
  505 
  506         return (v);
  507 }
  508 
  509 static void
  510 sk_miibus_statchg(dev)
  511         device_t                dev;
  512 {
  513         struct sk_if_softc      *sc_if;
  514 
  515         sc_if = device_get_softc(dev);
  516 
  517         SK_IF_MII_LOCK(sc_if);
  518         switch(sc_if->sk_softc->sk_type) {
  519         case SK_GENESIS:
  520                 sk_xmac_miibus_statchg(sc_if);
  521                 break;
  522         case SK_YUKON:
  523         case SK_YUKON_LITE:
  524         case SK_YUKON_LP:
  525                 sk_marv_miibus_statchg(sc_if);
  526                 break;
  527         }
  528         SK_IF_MII_UNLOCK(sc_if);
  529 
  530         return;
  531 }
  532 
  533 static int
  534 sk_xmac_miibus_readreg(sc_if, phy, reg)
  535         struct sk_if_softc      *sc_if;
  536         int                     phy, reg;
  537 {
  538         int                     i;
  539 
  540         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
  541         SK_XM_READ_2(sc_if, XM_PHY_DATA);
  542         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
  543                 for (i = 0; i < SK_TIMEOUT; i++) {
  544                         DELAY(1);
  545                         if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
  546                             XM_MMUCMD_PHYDATARDY)
  547                                 break;
  548                 }
  549 
  550                 if (i == SK_TIMEOUT) {
  551                         if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  552                         return(0);
  553                 }
  554         }
  555         DELAY(1);
  556         i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
  557 
  558         return(i);
  559 }
  560 
  561 static int
  562 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
  563         struct sk_if_softc      *sc_if;
  564         int                     phy, reg, val;
  565 {
  566         int                     i;
  567 
  568         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
  569         for (i = 0; i < SK_TIMEOUT; i++) {
  570                 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
  571                         break;
  572         }
  573 
  574         if (i == SK_TIMEOUT) {
  575                 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  576                 return (ETIMEDOUT);
  577         }
  578 
  579         SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
  580         for (i = 0; i < SK_TIMEOUT; i++) {
  581                 DELAY(1);
  582                 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
  583                         break;
  584         }
  585         if (i == SK_TIMEOUT)
  586                 if_printf(sc_if->sk_ifp, "phy write timed out\n");
  587 
  588         return(0);
  589 }
  590 
  591 static void
  592 sk_xmac_miibus_statchg(sc_if)
  593         struct sk_if_softc      *sc_if;
  594 {
  595         struct mii_data         *mii;
  596 
  597         mii = device_get_softc(sc_if->sk_miibus);
  598 
  599         /*
  600          * If this is a GMII PHY, manually set the XMAC's
  601          * duplex mode accordingly.
  602          */
  603         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
  604                 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  605                         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
  606                 } else {
  607                         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
  608                 }
  609         }
  610 }
  611 
  612 static int
  613 sk_marv_miibus_readreg(sc_if, phy, reg)
  614         struct sk_if_softc      *sc_if;
  615         int                     phy, reg;
  616 {
  617         u_int16_t               val;
  618         int                     i;
  619 
  620         if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
  621             sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
  622                 return(0);
  623         }
  624 
  625         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  626                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
  627 
  628         for (i = 0; i < SK_TIMEOUT; i++) {
  629                 DELAY(1);
  630                 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
  631                 if (val & YU_SMICR_READ_VALID)
  632                         break;
  633         }
  634 
  635         if (i == SK_TIMEOUT) {
  636                 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  637                 return(0);
  638         }
  639 
  640         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
  641 
  642         return(val);
  643 }
  644 
  645 static int
  646 sk_marv_miibus_writereg(sc_if, phy, reg, val)
  647         struct sk_if_softc      *sc_if;
  648         int                     phy, reg, val;
  649 {
  650         int                     i;
  651 
  652         SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
  653         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  654                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
  655 
  656         for (i = 0; i < SK_TIMEOUT; i++) {
  657                 DELAY(1);
  658                 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
  659                         break;
  660         }
  661         if (i == SK_TIMEOUT)
  662                 if_printf(sc_if->sk_ifp, "phy write timeout\n");
  663 
  664         return(0);
  665 }
  666 
  667 static void
  668 sk_marv_miibus_statchg(sc_if)
  669         struct sk_if_softc      *sc_if;
  670 {
  671         return;
  672 }
  673 
  674 #define HASH_BITS               6
  675 
  676 static u_int32_t
  677 sk_xmchash(addr)
  678         const uint8_t *addr;
  679 {
  680         uint32_t crc;
  681 
  682         /* Compute CRC for the address value. */
  683         crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
  684 
  685         return (~crc & ((1 << HASH_BITS) - 1));
  686 }
  687 
  688 static void
  689 sk_setfilt(sc_if, addr, slot)
  690         struct sk_if_softc      *sc_if;
  691         u_int16_t               *addr;
  692         int                     slot;
  693 {
  694         int                     base;
  695 
  696         base = XM_RXFILT_ENTRY(slot);
  697 
  698         SK_XM_WRITE_2(sc_if, base, addr[0]);
  699         SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
  700         SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
  701 
  702         return;
  703 }
  704 
  705 static void
  706 sk_rxfilter(sc_if)
  707         struct sk_if_softc      *sc_if;
  708 {
  709         struct sk_softc         *sc;
  710 
  711         SK_IF_LOCK_ASSERT(sc_if);
  712 
  713         sc = sc_if->sk_softc;
  714         if (sc->sk_type == SK_GENESIS)
  715                 sk_rxfilter_genesis(sc_if);
  716         else
  717                 sk_rxfilter_yukon(sc_if);
  718 }
  719 
  720 static void
  721 sk_rxfilter_genesis(sc_if)
  722         struct sk_if_softc      *sc_if;
  723 {
  724         struct ifnet            *ifp = sc_if->sk_ifp;
  725         u_int32_t               hashes[2] = { 0, 0 }, mode;
  726         int                     h = 0, i;
  727         struct ifmultiaddr      *ifma;
  728         u_int16_t               dummy[] = { 0, 0, 0 };
  729         u_int16_t               maddr[(ETHER_ADDR_LEN+1)/2];
  730 
  731         SK_IF_LOCK_ASSERT(sc_if);
  732 
  733         mode = SK_XM_READ_4(sc_if, XM_MODE);
  734         mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
  735             XM_MODE_RX_USE_PERFECT);
  736         /* First, zot all the existing perfect filters. */
  737         for (i = 1; i < XM_RXFILT_MAX; i++)
  738                 sk_setfilt(sc_if, dummy, i);
  739 
  740         /* Now program new ones. */
  741         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  742                 if (ifp->if_flags & IFF_ALLMULTI)
  743                         mode |= XM_MODE_RX_USE_HASH;
  744                 if (ifp->if_flags & IFF_PROMISC)
  745                         mode |= XM_MODE_RX_PROMISC;
  746                 hashes[0] = 0xFFFFFFFF;
  747                 hashes[1] = 0xFFFFFFFF;
  748         } else {
  749                 i = 1;
  750                 if_maddr_rlock(ifp);
  751                 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
  752                     ifma_link) {
  753                         if (ifma->ifma_addr->sa_family != AF_LINK)
  754                                 continue;
  755                         /*
  756                          * Program the first XM_RXFILT_MAX multicast groups
  757                          * into the perfect filter.
  758                          */
  759                         bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
  760                             maddr, ETHER_ADDR_LEN);
  761                         if (i < XM_RXFILT_MAX) {
  762                                 sk_setfilt(sc_if, maddr, i);
  763                                 mode |= XM_MODE_RX_USE_PERFECT;
  764                                 i++;
  765                                 continue;
  766                         }
  767                         h = sk_xmchash((const uint8_t *)maddr);
  768                         if (h < 32)
  769                                 hashes[0] |= (1 << h);
  770                         else
  771                                 hashes[1] |= (1 << (h - 32));
  772                         mode |= XM_MODE_RX_USE_HASH;
  773                 }
  774                 if_maddr_runlock(ifp);
  775         }
  776 
  777         SK_XM_WRITE_4(sc_if, XM_MODE, mode);
  778         SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
  779         SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
  780 }
  781 
  782 static void
  783 sk_rxfilter_yukon(sc_if)
  784         struct sk_if_softc      *sc_if;
  785 {
  786         struct ifnet            *ifp;
  787         u_int32_t               crc, hashes[2] = { 0, 0 }, mode;
  788         struct ifmultiaddr      *ifma;
  789 
  790         SK_IF_LOCK_ASSERT(sc_if);
  791 
  792         ifp = sc_if->sk_ifp;
  793         mode = SK_YU_READ_2(sc_if, YUKON_RCR);
  794         if (ifp->if_flags & IFF_PROMISC)
  795                 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN); 
  796         else if (ifp->if_flags & IFF_ALLMULTI) {
  797                 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN; 
  798                 hashes[0] = 0xFFFFFFFF;
  799                 hashes[1] = 0xFFFFFFFF;
  800         } else {
  801                 mode |= YU_RCR_UFLEN;
  802                 if_maddr_rlock(ifp);
  803                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  804                         if (ifma->ifma_addr->sa_family != AF_LINK)
  805                                 continue;
  806                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  807                             ifma->ifma_addr), ETHER_ADDR_LEN);
  808                         /* Just want the 6 least significant bits. */
  809                         crc &= 0x3f;
  810                         /* Set the corresponding bit in the hash table. */
  811                         hashes[crc >> 5] |= 1 << (crc & 0x1f);
  812                 }
  813                 if_maddr_runlock(ifp);
  814                 if (hashes[0] != 0 || hashes[1] != 0)
  815                         mode |= YU_RCR_MUFLEN;
  816         }
  817 
  818         SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
  819         SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
  820         SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
  821         SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
  822         SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
  823 }
  824 
  825 static int
  826 sk_init_rx_ring(sc_if)
  827         struct sk_if_softc      *sc_if;
  828 {
  829         struct sk_ring_data     *rd;
  830         bus_addr_t              addr;
  831         u_int32_t               csum_start;
  832         int                     i;
  833 
  834         sc_if->sk_cdata.sk_rx_cons = 0;
  835 
  836         csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
  837             ETHER_HDR_LEN;
  838         rd = &sc_if->sk_rdata;
  839         bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
  840         for (i = 0; i < SK_RX_RING_CNT; i++) {
  841                 if (sk_newbuf(sc_if, i) != 0)
  842                         return (ENOBUFS);
  843                 if (i == (SK_RX_RING_CNT - 1))
  844                         addr = SK_RX_RING_ADDR(sc_if, 0);
  845                 else
  846                         addr = SK_RX_RING_ADDR(sc_if, i + 1);
  847                 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  848                 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
  849         }
  850 
  851         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
  852             sc_if->sk_cdata.sk_rx_ring_map,
  853             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  854 
  855         return(0);
  856 }
  857 
  858 static int
  859 sk_init_jumbo_rx_ring(sc_if)
  860         struct sk_if_softc      *sc_if;
  861 {
  862         struct sk_ring_data     *rd;
  863         bus_addr_t              addr;
  864         u_int32_t               csum_start;
  865         int                     i;
  866 
  867         sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
  868 
  869         csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
  870             ETHER_HDR_LEN;
  871         rd = &sc_if->sk_rdata;
  872         bzero(rd->sk_jumbo_rx_ring,
  873             sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
  874         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
  875                 if (sk_jumbo_newbuf(sc_if, i) != 0)
  876                         return (ENOBUFS);
  877                 if (i == (SK_JUMBO_RX_RING_CNT - 1))
  878                         addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
  879                 else
  880                         addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
  881                 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  882                 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
  883         }
  884 
  885         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
  886             sc_if->sk_cdata.sk_jumbo_rx_ring_map,
  887             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  888 
  889         return (0);
  890 }
  891 
  892 static void
  893 sk_init_tx_ring(sc_if)
  894         struct sk_if_softc      *sc_if;
  895 {
  896         struct sk_ring_data     *rd;
  897         struct sk_txdesc        *txd;
  898         bus_addr_t              addr;
  899         int                     i;
  900 
  901         STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
  902         STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
  903 
  904         sc_if->sk_cdata.sk_tx_prod = 0;
  905         sc_if->sk_cdata.sk_tx_cons = 0;
  906         sc_if->sk_cdata.sk_tx_cnt = 0;
  907 
  908         rd = &sc_if->sk_rdata;
  909         bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
  910         for (i = 0; i < SK_TX_RING_CNT; i++) {
  911                 if (i == (SK_TX_RING_CNT - 1))
  912                         addr = SK_TX_RING_ADDR(sc_if, 0);
  913                 else
  914                         addr = SK_TX_RING_ADDR(sc_if, i + 1);
  915                 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  916                 txd = &sc_if->sk_cdata.sk_txdesc[i];
  917                 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
  918         }
  919 
  920         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
  921             sc_if->sk_cdata.sk_tx_ring_map,
  922             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  923 }
  924 
  925 static __inline void
  926 sk_discard_rxbuf(sc_if, idx)
  927         struct sk_if_softc      *sc_if;
  928         int                     idx;
  929 {
  930         struct sk_rx_desc       *r;
  931         struct sk_rxdesc        *rxd;
  932         struct mbuf             *m;
  933 
  934 
  935         r = &sc_if->sk_rdata.sk_rx_ring[idx];
  936         rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
  937         m = rxd->rx_m;
  938         r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
  939 }
  940 
  941 static __inline void
  942 sk_discard_jumbo_rxbuf(sc_if, idx)
  943         struct sk_if_softc      *sc_if;
  944         int                     idx;
  945 {
  946         struct sk_rx_desc       *r;
  947         struct sk_rxdesc        *rxd;
  948         struct mbuf             *m;
  949 
  950         r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
  951         rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
  952         m = rxd->rx_m;
  953         r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
  954 }
  955 
  956 static int
  957 sk_newbuf(sc_if, idx)
  958         struct sk_if_softc      *sc_if;
  959         int                     idx;
  960 {
  961         struct sk_rx_desc       *r;
  962         struct sk_rxdesc        *rxd;
  963         struct mbuf             *m;
  964         bus_dma_segment_t       segs[1];
  965         bus_dmamap_t            map;
  966         int                     nsegs;
  967 
  968         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  969         if (m == NULL)
  970                 return (ENOBUFS);
  971         m->m_len = m->m_pkthdr.len = MCLBYTES;
  972         m_adj(m, ETHER_ALIGN);
  973 
  974         if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
  975             sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
  976                 m_freem(m);
  977                 return (ENOBUFS);
  978         }
  979         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  980 
  981         rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
  982         if (rxd->rx_m != NULL) {
  983                 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
  984                     BUS_DMASYNC_POSTREAD);
  985                 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
  986         }
  987         map = rxd->rx_dmamap;
  988         rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
  989         sc_if->sk_cdata.sk_rx_sparemap = map;
  990         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
  991             BUS_DMASYNC_PREREAD);
  992         rxd->rx_m = m;
  993         r = &sc_if->sk_rdata.sk_rx_ring[idx];
  994         r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
  995         r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
  996         r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
  997 
  998         return (0);
  999 }
 1000 
 1001 static int
 1002 sk_jumbo_newbuf(sc_if, idx)
 1003         struct sk_if_softc      *sc_if;
 1004         int                     idx;
 1005 {
 1006         struct sk_rx_desc       *r;
 1007         struct sk_rxdesc        *rxd;
 1008         struct mbuf             *m;
 1009         bus_dma_segment_t       segs[1];
 1010         bus_dmamap_t            map;
 1011         int                     nsegs;
 1012 
 1013         m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
 1014         if (m == NULL)
 1015                 return (ENOBUFS);
 1016         if ((m->m_flags & M_EXT) == 0) {
 1017                 m_freem(m);
 1018                 return (ENOBUFS);
 1019         }
 1020         m->m_pkthdr.len = m->m_len = MJUM9BYTES;
 1021         /*
 1022          * Adjust alignment so packet payload begins on a
 1023          * longword boundary. Mandatory for Alpha, useful on
 1024          * x86 too.
 1025          */
 1026         m_adj(m, ETHER_ALIGN);
 1027 
 1028         if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
 1029             sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 1030                 m_freem(m);
 1031                 return (ENOBUFS);
 1032         }
 1033         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1034 
 1035         rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
 1036         if (rxd->rx_m != NULL) {
 1037                 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
 1038                     BUS_DMASYNC_POSTREAD);
 1039                 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
 1040                     rxd->rx_dmamap);
 1041         }
 1042         map = rxd->rx_dmamap;
 1043         rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
 1044         sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
 1045         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
 1046             BUS_DMASYNC_PREREAD);
 1047         rxd->rx_m = m;
 1048         r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
 1049         r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
 1050         r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
 1051         r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
 1052 
 1053         return (0);
 1054 }
 1055 
 1056 /*
 1057  * Set media options.
 1058  */
 1059 static int
 1060 sk_ifmedia_upd(ifp)
 1061         struct ifnet            *ifp;
 1062 {
 1063         struct sk_if_softc      *sc_if = ifp->if_softc;
 1064         struct mii_data         *mii;
 1065 
 1066         mii = device_get_softc(sc_if->sk_miibus);
 1067         sk_init(sc_if);
 1068         mii_mediachg(mii);
 1069 
 1070         return(0);
 1071 }
 1072 
 1073 /*
 1074  * Report current media status.
 1075  */
 1076 static void
 1077 sk_ifmedia_sts(ifp, ifmr)
 1078         struct ifnet            *ifp;
 1079         struct ifmediareq       *ifmr;
 1080 {
 1081         struct sk_if_softc      *sc_if;
 1082         struct mii_data         *mii;
 1083 
 1084         sc_if = ifp->if_softc;
 1085         mii = device_get_softc(sc_if->sk_miibus);
 1086 
 1087         mii_pollstat(mii);
 1088         ifmr->ifm_active = mii->mii_media_active;
 1089         ifmr->ifm_status = mii->mii_media_status;
 1090 
 1091         return;
 1092 }
 1093 
 1094 static int
 1095 sk_ioctl(ifp, command, data)
 1096         struct ifnet            *ifp;
 1097         u_long                  command;
 1098         caddr_t                 data;
 1099 {
 1100         struct sk_if_softc      *sc_if = ifp->if_softc;
 1101         struct ifreq            *ifr = (struct ifreq *) data;
 1102         int                     error, mask;
 1103         struct mii_data         *mii;
 1104 
 1105         error = 0;
 1106         switch(command) {
 1107         case SIOCSIFMTU:
 1108                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
 1109                         error = EINVAL;
 1110                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1111                         if (sc_if->sk_jumbo_disable != 0 &&
 1112                             ifr->ifr_mtu > SK_MAX_FRAMELEN)
 1113                                 error = EINVAL;
 1114                         else {
 1115                                 SK_IF_LOCK(sc_if);
 1116                                 ifp->if_mtu = ifr->ifr_mtu;
 1117                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1118                                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1119                                         sk_init_locked(sc_if);
 1120                                 }
 1121                                 SK_IF_UNLOCK(sc_if);
 1122                         }
 1123                 }
 1124                 break;
 1125         case SIOCSIFFLAGS:
 1126                 SK_IF_LOCK(sc_if);
 1127                 if (ifp->if_flags & IFF_UP) {
 1128                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1129                                 if ((ifp->if_flags ^ sc_if->sk_if_flags)
 1130                                     & (IFF_PROMISC | IFF_ALLMULTI))
 1131                                         sk_rxfilter(sc_if);
 1132                         } else
 1133                                 sk_init_locked(sc_if);
 1134                 } else {
 1135                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1136                                 sk_stop(sc_if);
 1137                 }
 1138                 sc_if->sk_if_flags = ifp->if_flags;
 1139                 SK_IF_UNLOCK(sc_if);
 1140                 break;
 1141         case SIOCADDMULTI:
 1142         case SIOCDELMULTI:
 1143                 SK_IF_LOCK(sc_if);
 1144                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1145                         sk_rxfilter(sc_if);
 1146                 SK_IF_UNLOCK(sc_if);
 1147                 break;
 1148         case SIOCGIFMEDIA:
 1149         case SIOCSIFMEDIA:
 1150                 mii = device_get_softc(sc_if->sk_miibus);
 1151                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1152                 break;
 1153         case SIOCSIFCAP:
 1154                 SK_IF_LOCK(sc_if);
 1155                 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
 1156                         SK_IF_UNLOCK(sc_if);
 1157                         break;
 1158                 }
 1159                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1160                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1161                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1162                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1163                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1164                                 ifp->if_hwassist |= SK_CSUM_FEATURES;
 1165                         else
 1166                                 ifp->if_hwassist &= ~SK_CSUM_FEATURES;
 1167                 }
 1168                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1169                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 
 1170                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1171                 SK_IF_UNLOCK(sc_if);
 1172                 break;
 1173         default:
 1174                 error = ether_ioctl(ifp, command, data);
 1175                 break;
 1176         }
 1177 
 1178         return (error);
 1179 }
 1180 
 1181 /*
 1182  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
 1183  * IDs against our list and return a device name if we find a match.
 1184  */
 1185 static int
 1186 skc_probe(dev)
 1187         device_t                dev;
 1188 {
 1189         struct sk_type          *t = sk_devs;
 1190 
 1191         while(t->sk_name != NULL) {
 1192                 if ((pci_get_vendor(dev) == t->sk_vid) &&
 1193                     (pci_get_device(dev) == t->sk_did)) {
 1194                         /*
 1195                          * Only attach to rev. 2 of the Linksys EG1032 adapter.
 1196                          * Rev. 3 is supported by re(4).
 1197                          */
 1198                         if ((t->sk_vid == VENDORID_LINKSYS) &&
 1199                                 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
 1200                                 (pci_get_subdevice(dev) !=
 1201                                  SUBDEVICEID_LINKSYS_EG1032_REV2)) {
 1202                                 t++;
 1203                                 continue;
 1204                         }
 1205                         device_set_desc(dev, t->sk_name);
 1206                         return (BUS_PROBE_DEFAULT);
 1207                 }
 1208                 t++;
 1209         }
 1210 
 1211         return(ENXIO);
 1212 }
 1213 
 1214 /*
 1215  * Force the GEnesis into reset, then bring it out of reset.
 1216  */
 1217 static void
 1218 sk_reset(sc)
 1219         struct sk_softc         *sc;
 1220 {
 1221 
 1222         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
 1223         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
 1224         if (SK_YUKON_FAMILY(sc->sk_type))
 1225                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
 1226 
 1227         DELAY(1000);
 1228         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
 1229         DELAY(2);
 1230         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
 1231         if (SK_YUKON_FAMILY(sc->sk_type))
 1232                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
 1233 
 1234         if (sc->sk_type == SK_GENESIS) {
 1235                 /* Configure packet arbiter */
 1236                 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
 1237                 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
 1238                 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
 1239                 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
 1240                 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
 1241         }
 1242 
 1243         /* Enable RAM interface */
 1244         sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
 1245 
 1246         /*
 1247          * Configure interrupt moderation. The moderation timer
 1248          * defers interrupts specified in the interrupt moderation
 1249          * timer mask based on the timeout specified in the interrupt
 1250          * moderation timer init register. Each bit in the timer
 1251          * register represents one tick, so to specify a timeout in
 1252          * microseconds, we have to multiply by the correct number of
 1253          * ticks-per-microsecond.
 1254          */
 1255         switch (sc->sk_type) {
 1256         case SK_GENESIS:
 1257                 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
 1258                 break;
 1259         default:
 1260                 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
 1261                 break;
 1262         }
 1263         if (bootverbose)
 1264                 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
 1265                     sc->sk_int_mod);
 1266         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
 1267             sc->sk_int_ticks));
 1268         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
 1269             SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
 1270         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
 1271 
 1272         return;
 1273 }
 1274 
 1275 static int
 1276 sk_probe(dev)
 1277         device_t                dev;
 1278 {
 1279         struct sk_softc         *sc;
 1280 
 1281         sc = device_get_softc(device_get_parent(dev));
 1282 
 1283         /*
 1284          * Not much to do here. We always know there will be
 1285          * at least one XMAC present, and if there are two,
 1286          * skc_attach() will create a second device instance
 1287          * for us.
 1288          */
 1289         switch (sc->sk_type) {
 1290         case SK_GENESIS:
 1291                 device_set_desc(dev, "XaQti Corp. XMAC II");
 1292                 break;
 1293         case SK_YUKON:
 1294         case SK_YUKON_LITE:
 1295         case SK_YUKON_LP:
 1296                 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
 1297                 break;
 1298         }
 1299 
 1300         return (BUS_PROBE_DEFAULT);
 1301 }
 1302 
 1303 /*
 1304  * Each XMAC chip is attached as a separate logical IP interface.
 1305  * Single port cards will have only one logical interface of course.
 1306  */
 1307 static int
 1308 sk_attach(dev)
 1309         device_t                dev;
 1310 {
 1311         struct sk_softc         *sc;
 1312         struct sk_if_softc      *sc_if;
 1313         struct ifnet            *ifp;
 1314         u_int32_t               r;
 1315         int                     error, i, phy, port;
 1316         u_char                  eaddr[6];
 1317         u_char                  inv_mac[] = {0, 0, 0, 0, 0, 0};
 1318 
 1319         if (dev == NULL)
 1320                 return(EINVAL);
 1321 
 1322         error = 0;
 1323         sc_if = device_get_softc(dev);
 1324         sc = device_get_softc(device_get_parent(dev));
 1325         port = *(int *)device_get_ivars(dev);
 1326 
 1327         sc_if->sk_if_dev = dev;
 1328         sc_if->sk_port = port;
 1329         sc_if->sk_softc = sc;
 1330         sc->sk_if[port] = sc_if;
 1331         if (port == SK_PORT_A)
 1332                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
 1333         if (port == SK_PORT_B)
 1334                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
 1335 
 1336         callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
 1337         callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
 1338 
 1339         if (sk_dma_alloc(sc_if) != 0) {
 1340                 error = ENOMEM;
 1341                 goto fail;
 1342         }
 1343         sk_dma_jumbo_alloc(sc_if);
 1344 
 1345         ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
 1346         if (ifp == NULL) {
 1347                 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
 1348                 error = ENOSPC;
 1349                 goto fail;
 1350         }
 1351         ifp->if_softc = sc_if;
 1352         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1353         ifp->if_mtu = ETHERMTU;
 1354         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1355         /*
 1356          * SK_GENESIS has a bug in checksum offload - From linux.
 1357          */
 1358         if (sc_if->sk_softc->sk_type != SK_GENESIS) {
 1359                 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
 1360                 ifp->if_hwassist = 0;
 1361         } else {
 1362                 ifp->if_capabilities = 0;
 1363                 ifp->if_hwassist = 0;
 1364         }
 1365         ifp->if_capenable = ifp->if_capabilities;
 1366         /*
 1367          * Some revision of Yukon controller generates corrupted
 1368          * frame when TX checksum offloading is enabled.  The
 1369          * frame has a valid checksum value so payload might be
 1370          * modified during TX checksum calculation. Disable TX
 1371          * checksum offloading but give users chance to enable it
 1372          * when they know their controller works without problems
 1373          * with TX checksum offloading.
 1374          */
 1375         ifp->if_capenable &= ~IFCAP_TXCSUM;
 1376         ifp->if_ioctl = sk_ioctl;
 1377         ifp->if_start = sk_start;
 1378         ifp->if_init = sk_init;
 1379         IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
 1380         ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
 1381         IFQ_SET_READY(&ifp->if_snd);
 1382 
 1383         /*
 1384          * Get station address for this interface. Note that
 1385          * dual port cards actually come with three station
 1386          * addresses: one for each port, plus an extra. The
 1387          * extra one is used by the SysKonnect driver software
 1388          * as a 'virtual' station address for when both ports
 1389          * are operating in failover mode. Currently we don't
 1390          * use this extra address.
 1391          */
 1392         SK_IF_LOCK(sc_if);
 1393         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1394                 eaddr[i] =
 1395                     sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
 1396 
 1397         /* Verify whether the station address is invalid or not. */
 1398         if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
 1399                 device_printf(sc_if->sk_if_dev,
 1400                     "Generating random ethernet address\n");
 1401                 r = arc4random();
 1402                 /*
 1403                  * Set OUI to convenient locally assigned address.  'b'
 1404                  * is 0x62, which has the locally assigned bit set, and
 1405                  * the broadcast/multicast bit clear.
 1406                  */
 1407                 eaddr[0] = 'b';
 1408                 eaddr[1] = 's';
 1409                 eaddr[2] = 'd';
 1410                 eaddr[3] = (r >> 16) & 0xff;
 1411                 eaddr[4] = (r >>  8) & 0xff;
 1412                 eaddr[5] = (r >>  0) & 0xff;
 1413         }
 1414         /*
 1415          * Set up RAM buffer addresses. The NIC will have a certain
 1416          * amount of SRAM on it, somewhere between 512K and 2MB. We
 1417          * need to divide this up a) between the transmitter and
 1418          * receiver and b) between the two XMACs, if this is a
 1419          * dual port NIC. Our algotithm is to divide up the memory
 1420          * evenly so that everyone gets a fair share.
 1421          *
 1422          * Just to be contrary, Yukon2 appears to have separate memory
 1423          * for each MAC.
 1424          */
 1425         if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
 1426                 u_int32_t               chunk, val;
 1427 
 1428                 chunk = sc->sk_ramsize / 2;
 1429                 val = sc->sk_rboff / sizeof(u_int64_t);
 1430                 sc_if->sk_rx_ramstart = val;
 1431                 val += (chunk / sizeof(u_int64_t));
 1432                 sc_if->sk_rx_ramend = val - 1;
 1433                 sc_if->sk_tx_ramstart = val;
 1434                 val += (chunk / sizeof(u_int64_t));
 1435                 sc_if->sk_tx_ramend = val - 1;
 1436         } else {
 1437                 u_int32_t               chunk, val;
 1438 
 1439                 chunk = sc->sk_ramsize / 4;
 1440                 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
 1441                     sizeof(u_int64_t);
 1442                 sc_if->sk_rx_ramstart = val;
 1443                 val += (chunk / sizeof(u_int64_t));
 1444                 sc_if->sk_rx_ramend = val - 1;
 1445                 sc_if->sk_tx_ramstart = val;
 1446                 val += (chunk / sizeof(u_int64_t));
 1447                 sc_if->sk_tx_ramend = val - 1;
 1448         }
 1449 
 1450         /* Read and save PHY type and set PHY address */
 1451         sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
 1452         if (!SK_YUKON_FAMILY(sc->sk_type)) {
 1453                 switch(sc_if->sk_phytype) {
 1454                 case SK_PHYTYPE_XMAC:
 1455                         sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
 1456                         break;
 1457                 case SK_PHYTYPE_BCOM:
 1458                         sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
 1459                         break;
 1460                 default:
 1461                         device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
 1462                             sc_if->sk_phytype);
 1463                         error = ENODEV;
 1464                         SK_IF_UNLOCK(sc_if);
 1465                         goto fail;
 1466                 }
 1467         } else {
 1468                 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
 1469                     sc->sk_pmd != 'S') {
 1470                         /* not initialized, punt */
 1471                         sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
 1472                         sc->sk_coppertype = 1;
 1473                 }
 1474 
 1475                 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
 1476 
 1477                 if (!(sc->sk_coppertype))
 1478                         sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
 1479         }
 1480 
 1481         /*
 1482          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1483          */
 1484         SK_IF_UNLOCK(sc_if);
 1485         ether_ifattach(ifp, eaddr);
 1486         SK_IF_LOCK(sc_if);
 1487 
 1488         /*
 1489          * The hardware should be ready for VLAN_MTU by default:
 1490          * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
 1491          * YU_SMR_MFL_VLAN is set by this driver in Yukon.
 1492          *
 1493          */
 1494         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1495         ifp->if_capenable |= IFCAP_VLAN_MTU;
 1496         /*
 1497          * Tell the upper layer(s) we support long frames.
 1498          * Must appear after the call to ether_ifattach() because
 1499          * ether_ifattach() sets ifi_hdrlen to the default value.
 1500          */
 1501         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1502 
 1503         /*
 1504          * Do miibus setup.
 1505          */
 1506         phy = MII_PHY_ANY;
 1507         switch (sc->sk_type) {
 1508         case SK_GENESIS:
 1509                 sk_init_xmac(sc_if);
 1510                 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
 1511                         phy = 0;
 1512                 break;
 1513         case SK_YUKON:
 1514         case SK_YUKON_LITE:
 1515         case SK_YUKON_LP:
 1516                 sk_init_yukon(sc_if);
 1517                 phy = 0;
 1518                 break;
 1519         }
 1520 
 1521         SK_IF_UNLOCK(sc_if);
 1522         error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
 1523             sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
 1524         if (error != 0) {
 1525                 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
 1526                 ether_ifdetach(ifp);
 1527                 goto fail;
 1528         }
 1529 
 1530 fail:
 1531         if (error) {
 1532                 /* Access should be ok even though lock has been dropped */
 1533                 sc->sk_if[port] = NULL;
 1534                 sk_detach(dev);
 1535         }
 1536 
 1537         return(error);
 1538 }
 1539 
 1540 /*
 1541  * Attach the interface. Allocate softc structures, do ifmedia
 1542  * setup and ethernet/BPF attach.
 1543  */
 1544 static int
 1545 skc_attach(dev)
 1546         device_t                dev;
 1547 {
 1548         struct sk_softc         *sc;
 1549         int                     error = 0, *port;
 1550         uint8_t                 skrs;
 1551         const char              *pname = NULL;
 1552         char                    *revstr;
 1553 
 1554         sc = device_get_softc(dev);
 1555         sc->sk_dev = dev;
 1556 
 1557         mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1558             MTX_DEF);
 1559         mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
 1560         /*
 1561          * Map control/status registers.
 1562          */
 1563         pci_enable_busmaster(dev);
 1564 
 1565         /* Allocate resources */
 1566 #ifdef SK_USEIOSPACE
 1567         sc->sk_res_spec = sk_res_spec_io;
 1568 #else
 1569         sc->sk_res_spec = sk_res_spec_mem;
 1570 #endif
 1571         error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
 1572         if (error) {
 1573                 if (sc->sk_res_spec == sk_res_spec_mem)
 1574                         sc->sk_res_spec = sk_res_spec_io;
 1575                 else
 1576                         sc->sk_res_spec = sk_res_spec_mem;
 1577                 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
 1578                 if (error) {
 1579                         device_printf(dev, "couldn't allocate %s resources\n",
 1580                             sc->sk_res_spec == sk_res_spec_mem ? "memory" :
 1581                             "I/O");
 1582                         goto fail;
 1583                 }
 1584         }
 1585 
 1586         sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
 1587         sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
 1588 
 1589         /* Bail out if chip is not recognized. */
 1590         if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
 1591                 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
 1592                     sc->sk_type, sc->sk_rev);
 1593                 error = ENXIO;
 1594                 goto fail;
 1595         }
 1596 
 1597         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1598                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1599                 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
 1600                 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
 1601                 "SK interrupt moderation");
 1602 
 1603         /* Pull in device tunables. */
 1604         sc->sk_int_mod = SK_IM_DEFAULT;
 1605         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1606                 "int_mod", &sc->sk_int_mod);
 1607         if (error == 0) {
 1608                 if (sc->sk_int_mod < SK_IM_MIN ||
 1609                     sc->sk_int_mod > SK_IM_MAX) {
 1610                         device_printf(dev, "int_mod value out of range; "
 1611                             "using default: %d\n", SK_IM_DEFAULT);
 1612                         sc->sk_int_mod = SK_IM_DEFAULT;
 1613                 }
 1614         }
 1615 
 1616         /* Reset the adapter. */
 1617         sk_reset(sc);
 1618 
 1619         skrs = sk_win_read_1(sc, SK_EPROM0);
 1620         if (sc->sk_type == SK_GENESIS) {
 1621                 /* Read and save RAM size and RAMbuffer offset */
 1622                 switch(skrs) {
 1623                 case SK_RAMSIZE_512K_64:
 1624                         sc->sk_ramsize = 0x80000;
 1625                         sc->sk_rboff = SK_RBOFF_0;
 1626                         break;
 1627                 case SK_RAMSIZE_1024K_64:
 1628                         sc->sk_ramsize = 0x100000;
 1629                         sc->sk_rboff = SK_RBOFF_80000;
 1630                         break;
 1631                 case SK_RAMSIZE_1024K_128:
 1632                         sc->sk_ramsize = 0x100000;
 1633                         sc->sk_rboff = SK_RBOFF_0;
 1634                         break;
 1635                 case SK_RAMSIZE_2048K_128:
 1636                         sc->sk_ramsize = 0x200000;
 1637                         sc->sk_rboff = SK_RBOFF_0;
 1638                         break;
 1639                 default:
 1640                         device_printf(dev, "unknown ram size: %d\n", skrs);
 1641                         error = ENXIO;
 1642                         goto fail;
 1643                 }
 1644         } else { /* SK_YUKON_FAMILY */
 1645                 if (skrs == 0x00)
 1646                         sc->sk_ramsize = 0x20000;
 1647                 else
 1648                         sc->sk_ramsize = skrs * (1<<12);
 1649                 sc->sk_rboff = SK_RBOFF_0;
 1650         }
 1651 
 1652         /* Read and save physical media type */
 1653          sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
 1654 
 1655          if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
 1656                  sc->sk_coppertype = 1;
 1657          else
 1658                  sc->sk_coppertype = 0;
 1659 
 1660         /* Determine whether to name it with VPD PN or just make it up.
 1661          * Marvell Yukon VPD PN seems to freqently be bogus. */
 1662         switch (pci_get_device(dev)) {
 1663         case DEVICEID_SK_V1:
 1664         case DEVICEID_BELKIN_5005:
 1665         case DEVICEID_3COM_3C940:
 1666         case DEVICEID_LINKSYS_EG1032:
 1667         case DEVICEID_DLINK_DGE530T_A1:
 1668         case DEVICEID_DLINK_DGE530T_B1:
 1669                 /* Stay with VPD PN. */
 1670                 (void) pci_get_vpd_ident(dev, &pname);
 1671                 break;
 1672         case DEVICEID_SK_V2:
 1673                 /* YUKON VPD PN might bear no resemblance to reality. */
 1674                 switch (sc->sk_type) {
 1675                 case SK_GENESIS:
 1676                         /* Stay with VPD PN. */
 1677                         (void) pci_get_vpd_ident(dev, &pname);
 1678                         break;
 1679                 case SK_YUKON:
 1680                         pname = "Marvell Yukon Gigabit Ethernet";
 1681                         break;
 1682                 case SK_YUKON_LITE:
 1683                         pname = "Marvell Yukon Lite Gigabit Ethernet";
 1684                         break;
 1685                 case SK_YUKON_LP:
 1686                         pname = "Marvell Yukon LP Gigabit Ethernet";
 1687                         break;
 1688                 default:
 1689                         pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
 1690                         break;
 1691                 }
 1692 
 1693                 /* Yukon Lite Rev. A0 needs special test. */
 1694                 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
 1695                         u_int32_t far;
 1696                         u_int8_t testbyte;
 1697 
 1698                         /* Save flash address register before testing. */
 1699                         far = sk_win_read_4(sc, SK_EP_ADDR);
 1700 
 1701                         sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
 1702                         testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
 1703 
 1704                         if (testbyte != 0x00) {
 1705                                 /* Yukon Lite Rev. A0 detected. */
 1706                                 sc->sk_type = SK_YUKON_LITE;
 1707                                 sc->sk_rev = SK_YUKON_LITE_REV_A0;
 1708                                 /* Restore flash address register. */
 1709                                 sk_win_write_4(sc, SK_EP_ADDR, far);
 1710                         }
 1711                 }
 1712                 break;
 1713         default:
 1714                 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
 1715                         "chipver=%02x, rev=%x\n",
 1716                         pci_get_vendor(dev), pci_get_device(dev),
 1717                         sc->sk_type, sc->sk_rev);
 1718                 error = ENXIO;
 1719                 goto fail;
 1720         }
 1721 
 1722         if (sc->sk_type == SK_YUKON_LITE) {
 1723                 switch (sc->sk_rev) {
 1724                 case SK_YUKON_LITE_REV_A0:
 1725                         revstr = "A0";
 1726                         break;
 1727                 case SK_YUKON_LITE_REV_A1:
 1728                         revstr = "A1";
 1729                         break;
 1730                 case SK_YUKON_LITE_REV_A3:
 1731                         revstr = "A3";
 1732                         break;
 1733                 default:
 1734                         revstr = "";
 1735                         break;
 1736                 }
 1737         } else {
 1738                 revstr = "";
 1739         }
 1740 
 1741         /* Announce the product name and more VPD data if there. */
 1742         if (pname != NULL)
 1743                 device_printf(dev, "%s rev. %s(0x%x)\n",
 1744                         pname, revstr, sc->sk_rev);
 1745 
 1746         if (bootverbose) {
 1747                 device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
 1748                 device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
 1749                 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
 1750                 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
 1751         }
 1752 
 1753         sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
 1754         if (sc->sk_devs[SK_PORT_A] == NULL) {
 1755                 device_printf(dev, "failed to add child for PORT_A\n");
 1756                 error = ENXIO;
 1757                 goto fail;
 1758         }
 1759         port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
 1760         if (port == NULL) {
 1761                 device_printf(dev, "failed to allocate memory for "
 1762                     "ivars of PORT_A\n");
 1763                 error = ENXIO;
 1764                 goto fail;
 1765         }
 1766         *port = SK_PORT_A;
 1767         device_set_ivars(sc->sk_devs[SK_PORT_A], port);
 1768 
 1769         if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
 1770                 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
 1771                 if (sc->sk_devs[SK_PORT_B] == NULL) {
 1772                         device_printf(dev, "failed to add child for PORT_B\n");
 1773                         error = ENXIO;
 1774                         goto fail;
 1775                 }
 1776                 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
 1777                 if (port == NULL) {
 1778                         device_printf(dev, "failed to allocate memory for "
 1779                             "ivars of PORT_B\n");
 1780                         error = ENXIO;
 1781                         goto fail;
 1782                 }
 1783                 *port = SK_PORT_B;
 1784                 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
 1785         }
 1786 
 1787         /* Turn on the 'driver is loaded' LED. */
 1788         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
 1789 
 1790         error = bus_generic_attach(dev);
 1791         if (error) {
 1792                 device_printf(dev, "failed to attach port(s)\n");
 1793                 goto fail;
 1794         }
 1795 
 1796         /* Hook interrupt last to avoid having to lock softc */
 1797         error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
 1798             NULL, sk_intr, sc, &sc->sk_intrhand);
 1799 
 1800         if (error) {
 1801                 device_printf(dev, "couldn't set up irq\n");
 1802                 goto fail;
 1803         }
 1804 
 1805 fail:
 1806         if (error)
 1807                 skc_detach(dev);
 1808 
 1809         return(error);
 1810 }
 1811 
 1812 /*
 1813  * Shutdown hardware and free up resources. This can be called any
 1814  * time after the mutex has been initialized. It is called in both
 1815  * the error case in attach and the normal detach case so it needs
 1816  * to be careful about only freeing resources that have actually been
 1817  * allocated.
 1818  */
 1819 static int
 1820 sk_detach(dev)
 1821         device_t                dev;
 1822 {
 1823         struct sk_if_softc      *sc_if;
 1824         struct ifnet            *ifp;
 1825 
 1826         sc_if = device_get_softc(dev);
 1827         KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
 1828             ("sk mutex not initialized in sk_detach"));
 1829         SK_IF_LOCK(sc_if);
 1830 
 1831         ifp = sc_if->sk_ifp;
 1832         /* These should only be active if attach_xmac succeeded */
 1833         if (device_is_attached(dev)) {
 1834                 sk_stop(sc_if);
 1835                 /* Can't hold locks while calling detach */
 1836                 SK_IF_UNLOCK(sc_if);
 1837                 callout_drain(&sc_if->sk_tick_ch);
 1838                 callout_drain(&sc_if->sk_watchdog_ch);
 1839                 ether_ifdetach(ifp);
 1840                 SK_IF_LOCK(sc_if);
 1841         }
 1842         if (ifp)
 1843                 if_free(ifp);
 1844         /*
 1845          * We're generally called from skc_detach() which is using
 1846          * device_delete_child() to get to here. It's already trashed
 1847          * miibus for us, so don't do it here or we'll panic.
 1848          */
 1849         /*
 1850         if (sc_if->sk_miibus != NULL)
 1851                 device_delete_child(dev, sc_if->sk_miibus);
 1852         */
 1853         bus_generic_detach(dev);
 1854         sk_dma_jumbo_free(sc_if);
 1855         sk_dma_free(sc_if);
 1856         SK_IF_UNLOCK(sc_if);
 1857 
 1858         return(0);
 1859 }
 1860 
 1861 static int
 1862 skc_detach(dev)
 1863         device_t                dev;
 1864 {
 1865         struct sk_softc         *sc;
 1866 
 1867         sc = device_get_softc(dev);
 1868         KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
 1869 
 1870         if (device_is_alive(dev)) {
 1871                 if (sc->sk_devs[SK_PORT_A] != NULL) {
 1872                         free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
 1873                         device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
 1874                 }
 1875                 if (sc->sk_devs[SK_PORT_B] != NULL) {
 1876                         free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
 1877                         device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
 1878                 }
 1879                 bus_generic_detach(dev);
 1880         }
 1881 
 1882         if (sc->sk_intrhand)
 1883                 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
 1884         bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
 1885 
 1886         mtx_destroy(&sc->sk_mii_mtx);
 1887         mtx_destroy(&sc->sk_mtx);
 1888 
 1889         return(0);
 1890 }
 1891 
 1892 struct sk_dmamap_arg {
 1893         bus_addr_t      sk_busaddr;
 1894 };
 1895 
 1896 static void
 1897 sk_dmamap_cb(arg, segs, nseg, error)
 1898         void                    *arg;
 1899         bus_dma_segment_t       *segs;
 1900         int                     nseg;
 1901         int                     error;
 1902 {
 1903         struct sk_dmamap_arg    *ctx;
 1904 
 1905         if (error != 0)
 1906                 return;
 1907 
 1908         ctx = arg;
 1909         ctx->sk_busaddr = segs[0].ds_addr;
 1910 }
 1911 
 1912 /*
 1913  * Allocate jumbo buffer storage. The SysKonnect adapters support
 1914  * "jumbograms" (9K frames), although SysKonnect doesn't currently
 1915  * use them in their drivers. In order for us to use them, we need
 1916  * large 9K receive buffers, however standard mbuf clusters are only
 1917  * 2048 bytes in size. Consequently, we need to allocate and manage
 1918  * our own jumbo buffer pool. Fortunately, this does not require an
 1919  * excessive amount of additional code.
 1920  */
 1921 static int
 1922 sk_dma_alloc(sc_if)
 1923         struct sk_if_softc      *sc_if;
 1924 {
 1925         struct sk_dmamap_arg    ctx;
 1926         struct sk_txdesc        *txd;
 1927         struct sk_rxdesc        *rxd;
 1928         int                     error, i;
 1929 
 1930         /* create parent tag */
 1931         /*
 1932          * XXX
 1933          * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
 1934          * in bus_dma_tag_create(9) as the NIC would support DAC mode.
 1935          * However bz@ reported that it does not work on amd64 with > 4GB
 1936          * RAM. Until we have more clues of the breakage, disable DAC mode
 1937          * by limiting DMA address to be in 32bit address space.
 1938          */
 1939         error = bus_dma_tag_create(
 1940                     bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
 1941                     1, 0,                       /* algnmnt, boundary */
 1942                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1943                     BUS_SPACE_MAXADDR,          /* highaddr */
 1944                     NULL, NULL,                 /* filter, filterarg */
 1945                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1946                     0,                          /* nsegments */
 1947                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1948                     0,                          /* flags */
 1949                     NULL, NULL,                 /* lockfunc, lockarg */
 1950                     &sc_if->sk_cdata.sk_parent_tag);
 1951         if (error != 0) {
 1952                 device_printf(sc_if->sk_if_dev,
 1953                     "failed to create parent DMA tag\n");
 1954                 goto fail;
 1955         }
 1956 
 1957         /* create tag for Tx ring */
 1958         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1959                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 1960                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1961                     BUS_SPACE_MAXADDR,          /* highaddr */
 1962                     NULL, NULL,                 /* filter, filterarg */
 1963                     SK_TX_RING_SZ,              /* maxsize */
 1964                     1,                          /* nsegments */
 1965                     SK_TX_RING_SZ,              /* maxsegsize */
 1966                     0,                          /* flags */
 1967                     NULL, NULL,                 /* lockfunc, lockarg */
 1968                     &sc_if->sk_cdata.sk_tx_ring_tag);
 1969         if (error != 0) {
 1970                 device_printf(sc_if->sk_if_dev,
 1971                     "failed to allocate Tx ring DMA tag\n");
 1972                 goto fail;
 1973         }
 1974 
 1975         /* create tag for Rx ring */
 1976         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1977                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 1978                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1979                     BUS_SPACE_MAXADDR,          /* highaddr */
 1980                     NULL, NULL,                 /* filter, filterarg */
 1981                     SK_RX_RING_SZ,              /* maxsize */
 1982                     1,                          /* nsegments */
 1983                     SK_RX_RING_SZ,              /* maxsegsize */
 1984                     0,                          /* flags */
 1985                     NULL, NULL,                 /* lockfunc, lockarg */
 1986                     &sc_if->sk_cdata.sk_rx_ring_tag);
 1987         if (error != 0) {
 1988                 device_printf(sc_if->sk_if_dev,
 1989                     "failed to allocate Rx ring DMA tag\n");
 1990                 goto fail;
 1991         }
 1992 
 1993         /* create tag for Tx buffers */
 1994         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1995                     1, 0,                       /* algnmnt, boundary */
 1996                     BUS_SPACE_MAXADDR,          /* lowaddr */
 1997                     BUS_SPACE_MAXADDR,          /* highaddr */
 1998                     NULL, NULL,                 /* filter, filterarg */
 1999                     MCLBYTES * SK_MAXTXSEGS,    /* maxsize */
 2000                     SK_MAXTXSEGS,               /* nsegments */
 2001                     MCLBYTES,                   /* maxsegsize */
 2002                     0,                          /* flags */
 2003                     NULL, NULL,                 /* lockfunc, lockarg */
 2004                     &sc_if->sk_cdata.sk_tx_tag);
 2005         if (error != 0) {
 2006                 device_printf(sc_if->sk_if_dev,
 2007                     "failed to allocate Tx DMA tag\n");
 2008                 goto fail;
 2009         }
 2010 
 2011         /* create tag for Rx buffers */
 2012         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2013                     1, 0,                       /* algnmnt, boundary */
 2014                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2015                     BUS_SPACE_MAXADDR,          /* highaddr */
 2016                     NULL, NULL,                 /* filter, filterarg */
 2017                     MCLBYTES,                   /* maxsize */
 2018                     1,                          /* nsegments */
 2019                     MCLBYTES,                   /* maxsegsize */
 2020                     0,                          /* flags */
 2021                     NULL, NULL,                 /* lockfunc, lockarg */
 2022                     &sc_if->sk_cdata.sk_rx_tag);
 2023         if (error != 0) {
 2024                 device_printf(sc_if->sk_if_dev,
 2025                     "failed to allocate Rx DMA tag\n");
 2026                 goto fail;
 2027         }
 2028 
 2029         /* allocate DMA'able memory and load the DMA map for Tx ring */
 2030         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
 2031             (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
 2032             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
 2033         if (error != 0) {
 2034                 device_printf(sc_if->sk_if_dev,
 2035                     "failed to allocate DMA'able memory for Tx ring\n");
 2036                 goto fail;
 2037         }
 2038 
 2039         ctx.sk_busaddr = 0;
 2040         error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
 2041             sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
 2042             SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2043         if (error != 0) {
 2044                 device_printf(sc_if->sk_if_dev,
 2045                     "failed to load DMA'able memory for Tx ring\n");
 2046                 goto fail;
 2047         }
 2048         sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
 2049 
 2050         /* allocate DMA'able memory and load the DMA map for Rx ring */
 2051         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
 2052             (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
 2053             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
 2054         if (error != 0) {
 2055                 device_printf(sc_if->sk_if_dev,
 2056                     "failed to allocate DMA'able memory for Rx ring\n");
 2057                 goto fail;
 2058         }
 2059 
 2060         ctx.sk_busaddr = 0;
 2061         error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
 2062             sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
 2063             SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2064         if (error != 0) {
 2065                 device_printf(sc_if->sk_if_dev,
 2066                     "failed to load DMA'able memory for Rx ring\n");
 2067                 goto fail;
 2068         }
 2069         sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
 2070 
 2071         /* create DMA maps for Tx buffers */
 2072         for (i = 0; i < SK_TX_RING_CNT; i++) {
 2073                 txd = &sc_if->sk_cdata.sk_txdesc[i];
 2074                 txd->tx_m = NULL;
 2075                 txd->tx_dmamap = NULL;
 2076                 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
 2077                     &txd->tx_dmamap);
 2078                 if (error != 0) {
 2079                         device_printf(sc_if->sk_if_dev,
 2080                             "failed to create Tx dmamap\n");
 2081                         goto fail;
 2082                 }
 2083         }
 2084 
 2085         /* create DMA maps for Rx buffers */
 2086         if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
 2087             &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
 2088                 device_printf(sc_if->sk_if_dev,
 2089                     "failed to create spare Rx dmamap\n");
 2090                 goto fail;
 2091         }
 2092         for (i = 0; i < SK_RX_RING_CNT; i++) {
 2093                 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 2094                 rxd->rx_m = NULL;
 2095                 rxd->rx_dmamap = NULL;
 2096                 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
 2097                     &rxd->rx_dmamap);
 2098                 if (error != 0) {
 2099                         device_printf(sc_if->sk_if_dev,
 2100                             "failed to create Rx dmamap\n");
 2101                         goto fail;
 2102                 }
 2103         }
 2104 
 2105 fail:
 2106         return (error);
 2107 }
 2108 
 2109 static int
 2110 sk_dma_jumbo_alloc(sc_if)
 2111         struct sk_if_softc      *sc_if;
 2112 {
 2113         struct sk_dmamap_arg    ctx;
 2114         struct sk_rxdesc        *jrxd;
 2115         int                     error, i;
 2116 
 2117         if (jumbo_disable != 0) {
 2118                 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
 2119                 sc_if->sk_jumbo_disable = 1;
 2120                 return (0);
 2121         }
 2122         /* create tag for jumbo Rx ring */
 2123         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2124                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 2125                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 2126                     BUS_SPACE_MAXADDR,          /* highaddr */
 2127                     NULL, NULL,                 /* filter, filterarg */
 2128                     SK_JUMBO_RX_RING_SZ,        /* maxsize */
 2129                     1,                          /* nsegments */
 2130                     SK_JUMBO_RX_RING_SZ,        /* maxsegsize */
 2131                     0,                          /* flags */
 2132                     NULL, NULL,                 /* lockfunc, lockarg */
 2133                     &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
 2134         if (error != 0) {
 2135                 device_printf(sc_if->sk_if_dev,
 2136                     "failed to allocate jumbo Rx ring DMA tag\n");
 2137                 goto jumbo_fail;
 2138         }
 2139 
 2140         /* create tag for jumbo Rx buffers */
 2141         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2142                     1, 0,                       /* algnmnt, boundary */
 2143                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2144                     BUS_SPACE_MAXADDR,          /* highaddr */
 2145                     NULL, NULL,                 /* filter, filterarg */
 2146                     MJUM9BYTES,                 /* maxsize */
 2147                     1,                          /* nsegments */
 2148                     MJUM9BYTES,                 /* maxsegsize */
 2149                     0,                          /* flags */
 2150                     NULL, NULL,                 /* lockfunc, lockarg */
 2151                     &sc_if->sk_cdata.sk_jumbo_rx_tag);
 2152         if (error != 0) {
 2153                 device_printf(sc_if->sk_if_dev,
 2154                     "failed to allocate jumbo Rx DMA tag\n");
 2155                 goto jumbo_fail;
 2156         }
 2157 
 2158         /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
 2159         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2160             (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
 2161             BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2162             &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2163         if (error != 0) {
 2164                 device_printf(sc_if->sk_if_dev,
 2165                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2166                 goto jumbo_fail;
 2167         }
 2168 
 2169         ctx.sk_busaddr = 0;
 2170         error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2171             sc_if->sk_cdata.sk_jumbo_rx_ring_map,
 2172             sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
 2173             &ctx, BUS_DMA_NOWAIT);
 2174         if (error != 0) {
 2175                 device_printf(sc_if->sk_if_dev,
 2176                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2177                 goto jumbo_fail;
 2178         }
 2179         sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
 2180 
 2181         /* create DMA maps for jumbo Rx buffers */
 2182         if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
 2183             &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
 2184                 device_printf(sc_if->sk_if_dev,
 2185                     "failed to create spare jumbo Rx dmamap\n");
 2186                 goto jumbo_fail;
 2187         }
 2188         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 2189                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 2190                 jrxd->rx_m = NULL;
 2191                 jrxd->rx_dmamap = NULL;
 2192                 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
 2193                     &jrxd->rx_dmamap);
 2194                 if (error != 0) {
 2195                         device_printf(sc_if->sk_if_dev,
 2196                             "failed to create jumbo Rx dmamap\n");
 2197                         goto jumbo_fail;
 2198                 }
 2199         }
 2200 
 2201         return (0);
 2202 
 2203 jumbo_fail:
 2204         sk_dma_jumbo_free(sc_if);
 2205         device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
 2206             "resource shortage\n");
 2207         sc_if->sk_jumbo_disable = 1;
 2208         return (0);
 2209 }
 2210 
 2211 static void
 2212 sk_dma_free(sc_if)
 2213         struct sk_if_softc      *sc_if;
 2214 {
 2215         struct sk_txdesc        *txd;
 2216         struct sk_rxdesc        *rxd;
 2217         int                     i;
 2218 
 2219         /* Tx ring */
 2220         if (sc_if->sk_cdata.sk_tx_ring_tag) {
 2221                 if (sc_if->sk_cdata.sk_tx_ring_map)
 2222                         bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
 2223                             sc_if->sk_cdata.sk_tx_ring_map);
 2224                 if (sc_if->sk_cdata.sk_tx_ring_map &&
 2225                     sc_if->sk_rdata.sk_tx_ring)
 2226                         bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
 2227                             sc_if->sk_rdata.sk_tx_ring,
 2228                             sc_if->sk_cdata.sk_tx_ring_map);
 2229                 sc_if->sk_rdata.sk_tx_ring = NULL;
 2230                 sc_if->sk_cdata.sk_tx_ring_map = NULL;
 2231                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
 2232                 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
 2233         }
 2234         /* Rx ring */
 2235         if (sc_if->sk_cdata.sk_rx_ring_tag) {
 2236                 if (sc_if->sk_cdata.sk_rx_ring_map)
 2237                         bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
 2238                             sc_if->sk_cdata.sk_rx_ring_map);
 2239                 if (sc_if->sk_cdata.sk_rx_ring_map &&
 2240                     sc_if->sk_rdata.sk_rx_ring)
 2241                         bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
 2242                             sc_if->sk_rdata.sk_rx_ring,
 2243                             sc_if->sk_cdata.sk_rx_ring_map);
 2244                 sc_if->sk_rdata.sk_rx_ring = NULL;
 2245                 sc_if->sk_cdata.sk_rx_ring_map = NULL;
 2246                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
 2247                 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
 2248         }
 2249         /* Tx buffers */
 2250         if (sc_if->sk_cdata.sk_tx_tag) {
 2251                 for (i = 0; i < SK_TX_RING_CNT; i++) {
 2252                         txd = &sc_if->sk_cdata.sk_txdesc[i];
 2253                         if (txd->tx_dmamap) {
 2254                                 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
 2255                                     txd->tx_dmamap);
 2256                                 txd->tx_dmamap = NULL;
 2257                         }
 2258                 }
 2259                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
 2260                 sc_if->sk_cdata.sk_tx_tag = NULL;
 2261         }
 2262         /* Rx buffers */
 2263         if (sc_if->sk_cdata.sk_rx_tag) {
 2264                 for (i = 0; i < SK_RX_RING_CNT; i++) {
 2265                         rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 2266                         if (rxd->rx_dmamap) {
 2267                                 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
 2268                                     rxd->rx_dmamap);
 2269                                 rxd->rx_dmamap = NULL;
 2270                         }
 2271                 }
 2272                 if (sc_if->sk_cdata.sk_rx_sparemap) {
 2273                         bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
 2274                             sc_if->sk_cdata.sk_rx_sparemap);
 2275                         sc_if->sk_cdata.sk_rx_sparemap = NULL;
 2276                 }
 2277                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
 2278                 sc_if->sk_cdata.sk_rx_tag = NULL;
 2279         }
 2280 
 2281         if (sc_if->sk_cdata.sk_parent_tag) {
 2282                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
 2283                 sc_if->sk_cdata.sk_parent_tag = NULL;
 2284         }
 2285 }
 2286 
 2287 static void
 2288 sk_dma_jumbo_free(sc_if)
 2289         struct sk_if_softc      *sc_if;
 2290 {
 2291         struct sk_rxdesc        *jrxd;
 2292         int                     i;
 2293 
 2294         /* jumbo Rx ring */
 2295         if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
 2296                 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
 2297                         bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2298                             sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2299                 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
 2300                     sc_if->sk_rdata.sk_jumbo_rx_ring)
 2301                         bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2302                             sc_if->sk_rdata.sk_jumbo_rx_ring,
 2303                             sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2304                 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
 2305                 sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
 2306                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
 2307                 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
 2308         }
 2309 
 2310         /* jumbo Rx buffers */
 2311         if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
 2312                 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 2313                         jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 2314                         if (jrxd->rx_dmamap) {
 2315                                 bus_dmamap_destroy(
 2316                                     sc_if->sk_cdata.sk_jumbo_rx_tag,
 2317                                     jrxd->rx_dmamap);
 2318                                 jrxd->rx_dmamap = NULL;
 2319                         }
 2320                 }
 2321                 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
 2322                         bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
 2323                             sc_if->sk_cdata.sk_jumbo_rx_sparemap);
 2324                         sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
 2325                 }
 2326                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
 2327                 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
 2328         }
 2329 }
 2330 
 2331 static void
 2332 sk_txcksum(ifp, m, f)
 2333         struct ifnet            *ifp;
 2334         struct mbuf             *m;
 2335         struct sk_tx_desc       *f;
 2336 {
 2337         struct ip               *ip;
 2338         u_int16_t               offset;
 2339         u_int8_t                *p;
 2340 
 2341         offset = sizeof(struct ip) + ETHER_HDR_LEN;
 2342         for(; m && m->m_len == 0; m = m->m_next)
 2343                 ;
 2344         if (m == NULL || m->m_len < ETHER_HDR_LEN) {
 2345                 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
 2346                 /* checksum may be corrupted */
 2347                 goto sendit;
 2348         }
 2349         if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
 2350                 if (m->m_len != ETHER_HDR_LEN) {
 2351                         if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
 2352                             __func__);
 2353                         /* checksum may be corrupted */
 2354                         goto sendit;
 2355                 }
 2356                 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
 2357                         ;
 2358                 if (m == NULL) {
 2359                         offset = sizeof(struct ip) + ETHER_HDR_LEN;
 2360                         /* checksum may be corrupted */
 2361                         goto sendit;
 2362                 }
 2363                 ip = mtod(m, struct ip *);
 2364         } else {
 2365                 p = mtod(m, u_int8_t *);
 2366                 p += ETHER_HDR_LEN;
 2367                 ip = (struct ip *)p;
 2368         }
 2369         offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
 2370 
 2371 sendit:
 2372         f->sk_csum_startval = 0;
 2373         f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
 2374             (offset << 16));
 2375 }
 2376 
 2377 static int
 2378 sk_encap(sc_if, m_head)
 2379         struct sk_if_softc      *sc_if;
 2380         struct mbuf             **m_head;
 2381 {
 2382         struct sk_txdesc        *txd;
 2383         struct sk_tx_desc       *f = NULL;
 2384         struct mbuf             *m;
 2385         bus_dma_segment_t       txsegs[SK_MAXTXSEGS];
 2386         u_int32_t               cflags, frag, si, sk_ctl;
 2387         int                     error, i, nseg;
 2388 
 2389         SK_IF_LOCK_ASSERT(sc_if);
 2390 
 2391         if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
 2392                 return (ENOBUFS);
 2393 
 2394         error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
 2395             txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
 2396         if (error == EFBIG) {
 2397                 m = m_defrag(*m_head, M_DONTWAIT);
 2398                 if (m == NULL) {
 2399                         m_freem(*m_head);
 2400                         *m_head = NULL;
 2401                         return (ENOMEM);
 2402                 }
 2403                 *m_head = m;
 2404                 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
 2405                     txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
 2406                 if (error != 0) {
 2407                         m_freem(*m_head);
 2408                         *m_head = NULL;
 2409                         return (error);
 2410                 }
 2411         } else if (error != 0)
 2412                 return (error);
 2413         if (nseg == 0) {
 2414                 m_freem(*m_head);
 2415                 *m_head = NULL;
 2416                 return (EIO);
 2417         }
 2418         if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
 2419                 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
 2420                 return (ENOBUFS);
 2421         }
 2422 
 2423         m = *m_head;
 2424         if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
 2425                 cflags = SK_OPCODE_CSUM;
 2426         else
 2427                 cflags = SK_OPCODE_DEFAULT;
 2428         si = frag = sc_if->sk_cdata.sk_tx_prod;
 2429         for (i = 0; i < nseg; i++) {
 2430                 f = &sc_if->sk_rdata.sk_tx_ring[frag];
 2431                 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
 2432                 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
 2433                 sk_ctl = txsegs[i].ds_len | cflags;
 2434                 if (i == 0) {
 2435                         if (cflags == SK_OPCODE_CSUM)
 2436                                 sk_txcksum(sc_if->sk_ifp, m, f);
 2437                         sk_ctl |= SK_TXCTL_FIRSTFRAG;
 2438                 } else
 2439                         sk_ctl |= SK_TXCTL_OWN;
 2440                 f->sk_ctl = htole32(sk_ctl);
 2441                 sc_if->sk_cdata.sk_tx_cnt++;
 2442                 SK_INC(frag, SK_TX_RING_CNT);
 2443         }
 2444         sc_if->sk_cdata.sk_tx_prod = frag;
 2445 
 2446         /* set EOF on the last desciptor */
 2447         frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
 2448         f = &sc_if->sk_rdata.sk_tx_ring[frag];
 2449         f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
 2450 
 2451         /* turn the first descriptor ownership to NIC */
 2452         f = &sc_if->sk_rdata.sk_tx_ring[si];
 2453         f->sk_ctl |= htole32(SK_TXCTL_OWN);
 2454 
 2455         STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
 2456         STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
 2457         txd->tx_m = m;
 2458 
 2459         /* sync descriptors */
 2460         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
 2461             BUS_DMASYNC_PREWRITE);
 2462         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2463             sc_if->sk_cdata.sk_tx_ring_map,
 2464             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2465 
 2466         return (0);
 2467 }
 2468 
 2469 static void
 2470 sk_start(ifp)
 2471         struct ifnet            *ifp;
 2472 {
 2473         struct sk_if_softc *sc_if;
 2474 
 2475         sc_if = ifp->if_softc;
 2476 
 2477         SK_IF_LOCK(sc_if);
 2478         sk_start_locked(ifp);
 2479         SK_IF_UNLOCK(sc_if);
 2480 
 2481         return;
 2482 }
 2483 
 2484 static void
 2485 sk_start_locked(ifp)
 2486         struct ifnet            *ifp;
 2487 {
 2488         struct sk_softc         *sc;
 2489         struct sk_if_softc      *sc_if;
 2490         struct mbuf             *m_head;
 2491         int                     enq;
 2492 
 2493         sc_if = ifp->if_softc;
 2494         sc = sc_if->sk_softc;
 2495 
 2496         SK_IF_LOCK_ASSERT(sc_if);
 2497 
 2498         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2499             sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
 2500                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2501                 if (m_head == NULL)
 2502                         break;
 2503 
 2504                 /*
 2505                  * Pack the data into the transmit ring. If we
 2506                  * don't have room, set the OACTIVE flag and wait
 2507                  * for the NIC to drain the ring.
 2508                  */
 2509                 if (sk_encap(sc_if, &m_head)) {
 2510                         if (m_head == NULL)
 2511                                 break;
 2512                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2513                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2514                         break;
 2515                 }
 2516 
 2517                 enq++;
 2518                 /*
 2519                  * If there's a BPF listener, bounce a copy of this frame
 2520                  * to him.
 2521                  */
 2522                 BPF_MTAP(ifp, m_head);
 2523         }
 2524 
 2525         if (enq > 0) {
 2526                 /* Transmit */
 2527                 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
 2528 
 2529                 /* Set a timeout in case the chip goes out to lunch. */
 2530                 sc_if->sk_watchdog_timer = 5;
 2531         }
 2532 }
 2533 
 2534 
 2535 static void
 2536 sk_watchdog(arg)
 2537         void                    *arg;
 2538 {
 2539         struct sk_if_softc      *sc_if;
 2540         struct ifnet            *ifp;
 2541 
 2542         ifp = arg;
 2543         sc_if = ifp->if_softc;
 2544 
 2545         SK_IF_LOCK_ASSERT(sc_if);
 2546 
 2547         if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
 2548                 goto done;
 2549 
 2550         /*
 2551          * Reclaim first as there is a possibility of losing Tx completion
 2552          * interrupts.
 2553          */
 2554         sk_txeof(sc_if);
 2555         if (sc_if->sk_cdata.sk_tx_cnt != 0) {
 2556                 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
 2557                 ifp->if_oerrors++;
 2558                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2559                 sk_init_locked(sc_if);
 2560         }
 2561 
 2562 done:
 2563         callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
 2564 
 2565         return;
 2566 }
 2567 
 2568 static int
 2569 skc_shutdown(dev)
 2570         device_t                dev;
 2571 {
 2572         struct sk_softc         *sc;
 2573 
 2574         sc = device_get_softc(dev);
 2575         SK_LOCK(sc);
 2576 
 2577         /* Turn off the 'driver is loaded' LED. */
 2578         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
 2579 
 2580         /*
 2581          * Reset the GEnesis controller. Doing this should also
 2582          * assert the resets on the attached XMAC(s).
 2583          */
 2584         sk_reset(sc);
 2585         SK_UNLOCK(sc);
 2586 
 2587         return (0);
 2588 }
 2589 
 2590 static int
 2591 skc_suspend(dev)
 2592         device_t                dev;
 2593 {
 2594         struct sk_softc         *sc;
 2595         struct sk_if_softc      *sc_if0, *sc_if1;
 2596         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 2597 
 2598         sc = device_get_softc(dev);
 2599 
 2600         SK_LOCK(sc);
 2601 
 2602         sc_if0 = sc->sk_if[SK_PORT_A];
 2603         sc_if1 = sc->sk_if[SK_PORT_B];
 2604         if (sc_if0 != NULL)
 2605                 ifp0 = sc_if0->sk_ifp;
 2606         if (sc_if1 != NULL)
 2607                 ifp1 = sc_if1->sk_ifp;
 2608         if (ifp0 != NULL)
 2609                 sk_stop(sc_if0);
 2610         if (ifp1 != NULL)
 2611                 sk_stop(sc_if1);
 2612         sc->sk_suspended = 1;
 2613 
 2614         SK_UNLOCK(sc);
 2615 
 2616         return (0);
 2617 }
 2618 
 2619 static int
 2620 skc_resume(dev)
 2621         device_t                dev;
 2622 {
 2623         struct sk_softc         *sc;
 2624         struct sk_if_softc      *sc_if0, *sc_if1;
 2625         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 2626 
 2627         sc = device_get_softc(dev);
 2628 
 2629         SK_LOCK(sc);
 2630 
 2631         sc_if0 = sc->sk_if[SK_PORT_A];
 2632         sc_if1 = sc->sk_if[SK_PORT_B];
 2633         if (sc_if0 != NULL)
 2634                 ifp0 = sc_if0->sk_ifp;
 2635         if (sc_if1 != NULL)
 2636                 ifp1 = sc_if1->sk_ifp;
 2637         if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
 2638                 sk_init_locked(sc_if0);
 2639         if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
 2640                 sk_init_locked(sc_if1);
 2641         sc->sk_suspended = 0;
 2642 
 2643         SK_UNLOCK(sc);
 2644 
 2645         return (0);
 2646 }
 2647 
 2648 /*
 2649  * According to the data sheet from SK-NET GENESIS the hardware can compute
 2650  * two Rx checksums at the same time(Each checksum start position is
 2651  * programmed in Rx descriptors). However it seems that TCP/UDP checksum
 2652  * does not work at least on my Yukon hardware. I tried every possible ways
 2653  * to get correct checksum value but couldn't get correct one. So TCP/UDP
 2654  * checksum offload was disabled at the moment and only IP checksum offload
 2655  * was enabled.
 2656  * As nomral IP header size is 20 bytes I can't expect it would give an
 2657  * increase in throughput. However it seems it doesn't hurt performance in
 2658  * my testing. If there is a more detailed information for checksum secret
 2659  * of the hardware in question please contact yongari@FreeBSD.org to add
 2660  * TCP/UDP checksum offload support.
 2661  */
 2662 static __inline void
 2663 sk_rxcksum(ifp, m, csum)
 2664         struct ifnet            *ifp;
 2665         struct mbuf             *m;
 2666         u_int32_t               csum;
 2667 {
 2668         struct ether_header     *eh;
 2669         struct ip               *ip;
 2670         int32_t                 hlen, len, pktlen;
 2671         u_int16_t               csum1, csum2, ipcsum;
 2672 
 2673         pktlen = m->m_pkthdr.len;
 2674         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 2675                 return;
 2676         eh = mtod(m, struct ether_header *);
 2677         if (eh->ether_type != htons(ETHERTYPE_IP))
 2678                 return;
 2679         ip = (struct ip *)(eh + 1);
 2680         if (ip->ip_v != IPVERSION)
 2681                 return;
 2682         hlen = ip->ip_hl << 2;
 2683         pktlen -= sizeof(struct ether_header);
 2684         if (hlen < sizeof(struct ip))
 2685                 return;
 2686         if (ntohs(ip->ip_len) < hlen)
 2687                 return;
 2688         if (ntohs(ip->ip_len) != pktlen)
 2689                 return;
 2690 
 2691         csum1 = htons(csum & 0xffff);
 2692         csum2 = htons((csum >> 16) & 0xffff);
 2693         ipcsum = in_addword(csum1, ~csum2 & 0xffff);
 2694         /* checksum fixup for IP options */
 2695         len = hlen - sizeof(struct ip);
 2696         if (len > 0) {
 2697                 /*
 2698                  * If the second checksum value is correct we can compute IP
 2699                  * checksum with simple math. Unfortunately the second checksum
 2700                  * value is wrong so we can't verify the checksum from the
 2701                  * value(It seems there is some magic here to get correct
 2702                  * value). If the second checksum value is correct it also
 2703                  * means we can get TCP/UDP checksum) here. However, it still
 2704                  * needs pseudo header checksum calculation due to hardware
 2705                  * limitations.
 2706                  */
 2707                 return;
 2708         }
 2709         m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 2710         if (ipcsum == 0xffff)
 2711                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2712 }
 2713 
 2714 static __inline int
 2715 sk_rxvalid(sc, stat, len)
 2716         struct sk_softc         *sc;
 2717         u_int32_t               stat, len;
 2718 {
 2719 
 2720         if (sc->sk_type == SK_GENESIS) {
 2721                 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
 2722                     XM_RXSTAT_BYTES(stat) != len)
 2723                         return (0);
 2724         } else {
 2725                 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
 2726                     YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
 2727                     YU_RXSTAT_JABBER)) != 0 ||
 2728                     (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
 2729                     YU_RXSTAT_BYTES(stat) != len)
 2730                         return (0);
 2731         }
 2732 
 2733         return (1);
 2734 }
 2735 
 2736 static void
 2737 sk_rxeof(sc_if)
 2738         struct sk_if_softc      *sc_if;
 2739 {
 2740         struct sk_softc         *sc;
 2741         struct mbuf             *m;
 2742         struct ifnet            *ifp;
 2743         struct sk_rx_desc       *cur_rx;
 2744         struct sk_rxdesc        *rxd;
 2745         int                     cons, prog;
 2746         u_int32_t               csum, rxstat, sk_ctl;
 2747 
 2748         sc = sc_if->sk_softc;
 2749         ifp = sc_if->sk_ifp;
 2750 
 2751         SK_IF_LOCK_ASSERT(sc_if);
 2752 
 2753         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
 2754             sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
 2755 
 2756         prog = 0;
 2757         for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
 2758             prog++, SK_INC(cons, SK_RX_RING_CNT)) {
 2759                 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
 2760                 sk_ctl = le32toh(cur_rx->sk_ctl);
 2761                 if ((sk_ctl & SK_RXCTL_OWN) != 0)
 2762                         break;
 2763                 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
 2764                 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
 2765 
 2766                 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
 2767                     SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
 2768                     SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
 2769                     SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
 2770                     SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
 2771                     sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
 2772                         ifp->if_ierrors++;
 2773                         sk_discard_rxbuf(sc_if, cons);
 2774                         continue;
 2775                 }
 2776 
 2777                 m = rxd->rx_m;
 2778                 csum = le32toh(cur_rx->sk_csum);
 2779                 if (sk_newbuf(sc_if, cons) != 0) {
 2780                         ifp->if_iqdrops++;
 2781                         /* reuse old buffer */
 2782                         sk_discard_rxbuf(sc_if, cons);
 2783                         continue;
 2784                 }
 2785                 m->m_pkthdr.rcvif = ifp;
 2786                 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
 2787                 ifp->if_ipackets++;
 2788                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2789                         sk_rxcksum(ifp, m, csum);
 2790                 SK_IF_UNLOCK(sc_if);
 2791                 (*ifp->if_input)(ifp, m);
 2792                 SK_IF_LOCK(sc_if);
 2793         }
 2794 
 2795         if (prog > 0) {
 2796                 sc_if->sk_cdata.sk_rx_cons = cons;
 2797                 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
 2798                     sc_if->sk_cdata.sk_rx_ring_map,
 2799                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2800         }
 2801 }
 2802 
 2803 static void
 2804 sk_jumbo_rxeof(sc_if)
 2805         struct sk_if_softc      *sc_if;
 2806 {
 2807         struct sk_softc         *sc;
 2808         struct mbuf             *m;
 2809         struct ifnet            *ifp;
 2810         struct sk_rx_desc       *cur_rx;
 2811         struct sk_rxdesc        *jrxd;
 2812         int                     cons, prog;
 2813         u_int32_t               csum, rxstat, sk_ctl;
 2814 
 2815         sc = sc_if->sk_softc;
 2816         ifp = sc_if->sk_ifp;
 2817 
 2818         SK_IF_LOCK_ASSERT(sc_if);
 2819 
 2820         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2821             sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
 2822 
 2823         prog = 0;
 2824         for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
 2825             prog < SK_JUMBO_RX_RING_CNT;
 2826             prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
 2827                 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
 2828                 sk_ctl = le32toh(cur_rx->sk_ctl);
 2829                 if ((sk_ctl & SK_RXCTL_OWN) != 0)
 2830                         break;
 2831                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
 2832                 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
 2833 
 2834                 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
 2835                     SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
 2836                     SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
 2837                     SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
 2838                     SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
 2839                     sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
 2840                         ifp->if_ierrors++;
 2841                         sk_discard_jumbo_rxbuf(sc_if, cons);
 2842                         continue;
 2843                 }
 2844 
 2845                 m = jrxd->rx_m;
 2846                 csum = le32toh(cur_rx->sk_csum);
 2847                 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
 2848                         ifp->if_iqdrops++;
 2849                         /* reuse old buffer */
 2850                         sk_discard_jumbo_rxbuf(sc_if, cons);
 2851                         continue;
 2852                 }
 2853                 m->m_pkthdr.rcvif = ifp;
 2854                 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
 2855                 ifp->if_ipackets++;
 2856                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2857                         sk_rxcksum(ifp, m, csum);
 2858                 SK_IF_UNLOCK(sc_if);
 2859                 (*ifp->if_input)(ifp, m);
 2860                 SK_IF_LOCK(sc_if);
 2861         }
 2862 
 2863         if (prog > 0) {
 2864                 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
 2865                 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2866                     sc_if->sk_cdata.sk_jumbo_rx_ring_map,
 2867                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2868         }
 2869 }
 2870 
 2871 static void
 2872 sk_txeof(sc_if)
 2873         struct sk_if_softc      *sc_if;
 2874 {
 2875         struct sk_softc         *sc;
 2876         struct sk_txdesc        *txd;
 2877         struct sk_tx_desc       *cur_tx;
 2878         struct ifnet            *ifp;
 2879         u_int32_t               idx, sk_ctl;
 2880 
 2881         sc = sc_if->sk_softc;
 2882         ifp = sc_if->sk_ifp;
 2883 
 2884         txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
 2885         if (txd == NULL)
 2886                 return;
 2887         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2888             sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
 2889         /*
 2890          * Go through our tx ring and free mbufs for those
 2891          * frames that have been sent.
 2892          */
 2893         for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
 2894                 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
 2895                         break;
 2896                 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
 2897                 sk_ctl = le32toh(cur_tx->sk_ctl);
 2898                 if (sk_ctl & SK_TXCTL_OWN)
 2899                         break;
 2900                 sc_if->sk_cdata.sk_tx_cnt--;
 2901                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2902                 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
 2903                         continue;
 2904                 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
 2905                     BUS_DMASYNC_POSTWRITE);
 2906                 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
 2907 
 2908                 ifp->if_opackets++;
 2909                 m_freem(txd->tx_m);
 2910                 txd->tx_m = NULL;
 2911                 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
 2912                 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
 2913                 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
 2914         }
 2915         sc_if->sk_cdata.sk_tx_cons = idx;
 2916         sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
 2917 
 2918         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2919             sc_if->sk_cdata.sk_tx_ring_map,
 2920             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2921 }
 2922 
 2923 static void
 2924 sk_tick(xsc_if)
 2925         void                    *xsc_if;
 2926 {
 2927         struct sk_if_softc      *sc_if;
 2928         struct mii_data         *mii;
 2929         struct ifnet            *ifp;
 2930         int                     i;
 2931 
 2932         sc_if = xsc_if;
 2933         ifp = sc_if->sk_ifp;
 2934         mii = device_get_softc(sc_if->sk_miibus);
 2935 
 2936         if (!(ifp->if_flags & IFF_UP))
 2937                 return;
 2938 
 2939         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 2940                 sk_intr_bcom(sc_if);
 2941                 return;
 2942         }
 2943 
 2944         /*
 2945          * According to SysKonnect, the correct way to verify that
 2946          * the link has come back up is to poll bit 0 of the GPIO
 2947          * register three times. This pin has the signal from the
 2948          * link_sync pin connected to it; if we read the same link
 2949          * state 3 times in a row, we know the link is up.
 2950          */
 2951         for (i = 0; i < 3; i++) {
 2952                 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
 2953                         break;
 2954         }
 2955 
 2956         if (i != 3) {
 2957                 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 2958                 return;
 2959         }
 2960 
 2961         /* Turn the GP0 interrupt back on. */
 2962         SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
 2963         SK_XM_READ_2(sc_if, XM_ISR);
 2964         mii_tick(mii);
 2965         callout_stop(&sc_if->sk_tick_ch);
 2966 }
 2967 
 2968 static void
 2969 sk_yukon_tick(xsc_if)
 2970         void                    *xsc_if;
 2971 {
 2972         struct sk_if_softc      *sc_if;
 2973         struct mii_data         *mii;
 2974 
 2975         sc_if = xsc_if;
 2976         mii = device_get_softc(sc_if->sk_miibus);
 2977 
 2978         mii_tick(mii);
 2979         callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
 2980 }
 2981 
 2982 static void
 2983 sk_intr_bcom(sc_if)
 2984         struct sk_if_softc      *sc_if;
 2985 {
 2986         struct mii_data         *mii;
 2987         struct ifnet            *ifp;
 2988         int                     status;
 2989         mii = device_get_softc(sc_if->sk_miibus);
 2990         ifp = sc_if->sk_ifp;
 2991 
 2992         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 2993 
 2994         /*
 2995          * Read the PHY interrupt register to make sure
 2996          * we clear any pending interrupts.
 2997          */
 2998         status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
 2999 
 3000         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 3001                 sk_init_xmac(sc_if);
 3002                 return;
 3003         }
 3004 
 3005         if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
 3006                 int                     lstat;
 3007                 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
 3008                     BRGPHY_MII_AUXSTS);
 3009 
 3010                 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
 3011                         mii_mediachg(mii);
 3012                         /* Turn off the link LED. */
 3013                         SK_IF_WRITE_1(sc_if, 0,
 3014                             SK_LINKLED1_CTL, SK_LINKLED_OFF);
 3015                         sc_if->sk_link = 0;
 3016                 } else if (status & BRGPHY_ISR_LNK_CHG) {
 3017                         sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3018                             BRGPHY_MII_IMR, 0xFF00);
 3019                         mii_tick(mii);
 3020                         sc_if->sk_link = 1;
 3021                         /* Turn on the link LED. */
 3022                         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
 3023                             SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
 3024                             SK_LINKLED_BLINK_OFF);
 3025                 } else {
 3026                         mii_tick(mii);
 3027                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3028                 }
 3029         }
 3030 
 3031         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 3032 
 3033         return;
 3034 }
 3035 
 3036 static void
 3037 sk_intr_xmac(sc_if)
 3038         struct sk_if_softc      *sc_if;
 3039 {
 3040         struct sk_softc         *sc;
 3041         u_int16_t               status;
 3042 
 3043         sc = sc_if->sk_softc;
 3044         status = SK_XM_READ_2(sc_if, XM_ISR);
 3045 
 3046         /*
 3047          * Link has gone down. Start MII tick timeout to
 3048          * watch for link resync.
 3049          */
 3050         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
 3051                 if (status & XM_ISR_GP0_SET) {
 3052                         SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
 3053                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3054                 }
 3055 
 3056                 if (status & XM_ISR_AUTONEG_DONE) {
 3057                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3058                 }
 3059         }
 3060 
 3061         if (status & XM_IMR_TX_UNDERRUN)
 3062                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
 3063 
 3064         if (status & XM_IMR_RX_OVERRUN)
 3065                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
 3066 
 3067         status = SK_XM_READ_2(sc_if, XM_ISR);
 3068 
 3069         return;
 3070 }
 3071 
 3072 static void
 3073 sk_intr_yukon(sc_if)
 3074         struct sk_if_softc      *sc_if;
 3075 {
 3076         u_int8_t status;
 3077 
 3078         status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
 3079         /* RX overrun */
 3080         if ((status & SK_GMAC_INT_RX_OVER) != 0) {
 3081                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
 3082                     SK_RFCTL_RX_FIFO_OVER);
 3083         }
 3084         /* TX underrun */
 3085         if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
 3086                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
 3087                     SK_TFCTL_TX_FIFO_UNDER);
 3088         }
 3089 }
 3090 
 3091 static void
 3092 sk_intr(xsc)
 3093         void                    *xsc;
 3094 {
 3095         struct sk_softc         *sc = xsc;
 3096         struct sk_if_softc      *sc_if0, *sc_if1;
 3097         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 3098         u_int32_t               status;
 3099 
 3100         SK_LOCK(sc);
 3101 
 3102         status = CSR_READ_4(sc, SK_ISSR);
 3103         if (status == 0 || status == 0xffffffff || sc->sk_suspended)
 3104                 goto done_locked;
 3105 
 3106         sc_if0 = sc->sk_if[SK_PORT_A];
 3107         sc_if1 = sc->sk_if[SK_PORT_B];
 3108 
 3109         if (sc_if0 != NULL)
 3110                 ifp0 = sc_if0->sk_ifp;
 3111         if (sc_if1 != NULL)
 3112                 ifp1 = sc_if1->sk_ifp;
 3113 
 3114         for (; (status &= sc->sk_intrmask) != 0;) {
 3115                 /* Handle receive interrupts first. */
 3116                 if (status & SK_ISR_RX1_EOF) {
 3117                         if (ifp0->if_mtu > SK_MAX_FRAMELEN)
 3118                                 sk_jumbo_rxeof(sc_if0);
 3119                         else
 3120                                 sk_rxeof(sc_if0);
 3121                         CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
 3122                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
 3123                 }
 3124                 if (status & SK_ISR_RX2_EOF) {
 3125                         if (ifp1->if_mtu > SK_MAX_FRAMELEN)
 3126                                 sk_jumbo_rxeof(sc_if1);
 3127                         else
 3128                                 sk_rxeof(sc_if1);
 3129                         CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
 3130                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
 3131                 }
 3132 
 3133                 /* Then transmit interrupts. */
 3134                 if (status & SK_ISR_TX1_S_EOF) {
 3135                         sk_txeof(sc_if0);
 3136                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
 3137                 }
 3138                 if (status & SK_ISR_TX2_S_EOF) {
 3139                         sk_txeof(sc_if1);
 3140                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
 3141                 }
 3142 
 3143                 /* Then MAC interrupts. */
 3144                 if (status & SK_ISR_MAC1 &&
 3145                     ifp0->if_drv_flags & IFF_DRV_RUNNING) {
 3146                         if (sc->sk_type == SK_GENESIS)
 3147                                 sk_intr_xmac(sc_if0);
 3148                         else
 3149                                 sk_intr_yukon(sc_if0);
 3150                 }
 3151 
 3152                 if (status & SK_ISR_MAC2 &&
 3153                     ifp1->if_drv_flags & IFF_DRV_RUNNING) {
 3154                         if (sc->sk_type == SK_GENESIS)
 3155                                 sk_intr_xmac(sc_if1);
 3156                         else
 3157                                 sk_intr_yukon(sc_if1);
 3158                 }
 3159 
 3160                 if (status & SK_ISR_EXTERNAL_REG) {
 3161                         if (ifp0 != NULL &&
 3162                             sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
 3163                                 sk_intr_bcom(sc_if0);
 3164                         if (ifp1 != NULL &&
 3165                             sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
 3166                                 sk_intr_bcom(sc_if1);
 3167                 }
 3168                 status = CSR_READ_4(sc, SK_ISSR);
 3169         }
 3170 
 3171         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3172 
 3173         if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3174                 sk_start_locked(ifp0);
 3175         if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3176                 sk_start_locked(ifp1);
 3177 
 3178 done_locked:
 3179         SK_UNLOCK(sc);
 3180 }
 3181 
 3182 static void
 3183 sk_init_xmac(sc_if)
 3184         struct sk_if_softc      *sc_if;
 3185 {
 3186         struct sk_softc         *sc;
 3187         struct ifnet            *ifp;
 3188         u_int16_t               eaddr[(ETHER_ADDR_LEN+1)/2];
 3189         struct sk_bcom_hack     bhack[] = {
 3190         { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
 3191         { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
 3192         { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
 3193         { 0, 0 } };
 3194 
 3195         SK_IF_LOCK_ASSERT(sc_if);
 3196 
 3197         sc = sc_if->sk_softc;
 3198         ifp = sc_if->sk_ifp;
 3199 
 3200         /* Unreset the XMAC. */
 3201         SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
 3202         DELAY(1000);
 3203 
 3204         /* Reset the XMAC's internal state. */
 3205         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
 3206 
 3207         /* Save the XMAC II revision */
 3208         sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
 3209 
 3210         /*
 3211          * Perform additional initialization for external PHYs,
 3212          * namely for the 1000baseTX cards that use the XMAC's
 3213          * GMII mode.
 3214          */
 3215         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 3216                 int                     i = 0;
 3217                 u_int32_t               val;
 3218 
 3219                 /* Take PHY out of reset. */
 3220                 val = sk_win_read_4(sc, SK_GPIO);
 3221                 if (sc_if->sk_port == SK_PORT_A)
 3222                         val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
 3223                 else
 3224                         val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
 3225                 sk_win_write_4(sc, SK_GPIO, val);
 3226 
 3227                 /* Enable GMII mode on the XMAC. */
 3228                 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
 3229 
 3230                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3231                     BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
 3232                 DELAY(10000);
 3233                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3234                     BRGPHY_MII_IMR, 0xFFF0);
 3235 
 3236                 /*
 3237                  * Early versions of the BCM5400 apparently have
 3238                  * a bug that requires them to have their reserved
 3239                  * registers initialized to some magic values. I don't
 3240                  * know what the numbers do, I'm just the messenger.
 3241                  */
 3242                 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
 3243                     == 0x6041) {
 3244                         while(bhack[i].reg) {
 3245                                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3246                                     bhack[i].reg, bhack[i].val);
 3247                                 i++;
 3248                         }
 3249                 }
 3250         }
 3251 
 3252         /* Set station address */
 3253         bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
 3254         SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
 3255         SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
 3256         SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
 3257         SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
 3258 
 3259         if (ifp->if_flags & IFF_BROADCAST) {
 3260                 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
 3261         } else {
 3262                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
 3263         }
 3264 
 3265         /* We don't need the FCS appended to the packet. */
 3266         SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
 3267 
 3268         /* We want short frames padded to 60 bytes. */
 3269         SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
 3270 
 3271         /*
 3272          * Enable the reception of all error frames. This is is
 3273          * a necessary evil due to the design of the XMAC. The
 3274          * XMAC's receive FIFO is only 8K in size, however jumbo
 3275          * frames can be up to 9000 bytes in length. When bad
 3276          * frame filtering is enabled, the XMAC's RX FIFO operates
 3277          * in 'store and forward' mode. For this to work, the
 3278          * entire frame has to fit into the FIFO, but that means
 3279          * that jumbo frames larger than 8192 bytes will be
 3280          * truncated. Disabling all bad frame filtering causes
 3281          * the RX FIFO to operate in streaming mode, in which
 3282          * case the XMAC will start transfering frames out of the
 3283          * RX FIFO as soon as the FIFO threshold is reached.
 3284          */
 3285         if (ifp->if_mtu > SK_MAX_FRAMELEN) {
 3286                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
 3287                     XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
 3288                     XM_MODE_RX_INRANGELEN);
 3289                 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
 3290         } else
 3291                 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
 3292 
 3293         /*
 3294          * Bump up the transmit threshold. This helps hold off transmit
 3295          * underruns when we're blasting traffic from both ports at once.
 3296          */
 3297         SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
 3298 
 3299         /* Set Rx filter */
 3300         sk_rxfilter_genesis(sc_if);
 3301 
 3302         /* Clear and enable interrupts */
 3303         SK_XM_READ_2(sc_if, XM_ISR);
 3304         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
 3305                 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
 3306         else
 3307                 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
 3308 
 3309         /* Configure MAC arbiter */
 3310         switch(sc_if->sk_xmac_rev) {
 3311         case XM_XMAC_REV_B2:
 3312                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
 3313                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
 3314                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
 3315                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
 3316                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
 3317                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
 3318                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
 3319                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
 3320                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
 3321                 break;
 3322         case XM_XMAC_REV_C1:
 3323                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
 3324                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
 3325                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
 3326                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
 3327                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
 3328                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
 3329                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
 3330                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
 3331                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
 3332                 break;
 3333         default:
 3334                 break;
 3335         }
 3336         sk_win_write_2(sc, SK_MACARB_CTL,
 3337             SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
 3338 
 3339         sc_if->sk_link = 1;
 3340 
 3341         return;
 3342 }
 3343 
 3344 static void
 3345 sk_init_yukon(sc_if)
 3346         struct sk_if_softc      *sc_if;
 3347 {
 3348         u_int32_t               phy, v;
 3349         u_int16_t               reg;
 3350         struct sk_softc         *sc;
 3351         struct ifnet            *ifp;
 3352         u_int8_t                *eaddr;
 3353         int                     i;
 3354 
 3355         SK_IF_LOCK_ASSERT(sc_if);
 3356 
 3357         sc = sc_if->sk_softc;
 3358         ifp = sc_if->sk_ifp;
 3359 
 3360         if (sc->sk_type == SK_YUKON_LITE &&
 3361             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
 3362                 /*
 3363                  * Workaround code for COMA mode, set PHY reset.
 3364                  * Otherwise it will not correctly take chip out of
 3365                  * powerdown (coma)
 3366                  */
 3367                 v = sk_win_read_4(sc, SK_GPIO);
 3368                 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
 3369                 sk_win_write_4(sc, SK_GPIO, v);
 3370         }
 3371 
 3372         /* GMAC and GPHY Reset */
 3373         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
 3374         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
 3375         DELAY(1000);
 3376 
 3377         if (sc->sk_type == SK_YUKON_LITE &&
 3378             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
 3379                 /*
 3380                  * Workaround code for COMA mode, clear PHY reset
 3381                  */
 3382                 v = sk_win_read_4(sc, SK_GPIO);
 3383                 v |= SK_GPIO_DIR9;
 3384                 v &= ~SK_GPIO_DAT9;
 3385                 sk_win_write_4(sc, SK_GPIO, v);
 3386         }
 3387 
 3388         phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
 3389                 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
 3390 
 3391         if (sc->sk_coppertype)
 3392                 phy |= SK_GPHY_COPPER;
 3393         else
 3394                 phy |= SK_GPHY_FIBER;
 3395 
 3396         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
 3397         DELAY(1000);
 3398         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
 3399         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
 3400                       SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
 3401 
 3402         /* unused read of the interrupt source register */
 3403         SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
 3404 
 3405         reg = SK_YU_READ_2(sc_if, YUKON_PAR);
 3406 
 3407         /* MIB Counter Clear Mode set */
 3408         reg |= YU_PAR_MIB_CLR;
 3409         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 3410 
 3411         /* MIB Counter Clear Mode clear */
 3412         reg &= ~YU_PAR_MIB_CLR;
 3413         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 3414 
 3415         /* receive control reg */
 3416         SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
 3417 
 3418         /* transmit parameter register */
 3419         SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
 3420                       YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
 3421 
 3422         /* serial mode register */
 3423         reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
 3424         if (ifp->if_mtu > SK_MAX_FRAMELEN)
 3425                 reg |= YU_SMR_MFL_JUMBO;
 3426         SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
 3427 
 3428         /* Setup Yukon's station address */
 3429         eaddr = IF_LLADDR(sc_if->sk_ifp);
 3430         for (i = 0; i < 3; i++)
 3431                 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
 3432                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3433         /* Set GMAC source address of flow control. */
 3434         for (i = 0; i < 3; i++)
 3435                 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
 3436                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3437         /* Set GMAC virtual address. */
 3438         for (i = 0; i < 3; i++)
 3439                 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
 3440                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3441 
 3442         /* Set Rx filter */
 3443         sk_rxfilter_yukon(sc_if);
 3444 
 3445         /* enable interrupt mask for counter overflows */
 3446         SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
 3447         SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
 3448         SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
 3449 
 3450         /* Configure RX MAC FIFO Flush Mask */
 3451         v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
 3452             YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
 3453             YU_RXSTAT_JABBER;
 3454         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
 3455 
 3456         /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
 3457         if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
 3458                 v = SK_TFCTL_OPERATION_ON;
 3459         else
 3460                 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
 3461         /* Configure RX MAC FIFO */
 3462         SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
 3463         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
 3464 
 3465         /* Increase flush threshould to 64 bytes */
 3466         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
 3467             SK_RFCTL_FIFO_THRESHOLD + 1);
 3468 
 3469         /* Configure TX MAC FIFO */
 3470         SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
 3471         SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
 3472 }
 3473 
 3474 /*
 3475  * Note that to properly initialize any part of the GEnesis chip,
 3476  * you first have to take it out of reset mode.
 3477  */
 3478 static void
 3479 sk_init(xsc)
 3480         void                    *xsc;
 3481 {
 3482         struct sk_if_softc      *sc_if = xsc;
 3483 
 3484         SK_IF_LOCK(sc_if);
 3485         sk_init_locked(sc_if);
 3486         SK_IF_UNLOCK(sc_if);
 3487 
 3488         return;
 3489 }
 3490 
 3491 static void
 3492 sk_init_locked(sc_if)
 3493         struct sk_if_softc      *sc_if;
 3494 {
 3495         struct sk_softc         *sc;
 3496         struct ifnet            *ifp;
 3497         struct mii_data         *mii;
 3498         u_int16_t               reg;
 3499         u_int32_t               imr;
 3500         int                     error;
 3501 
 3502         SK_IF_LOCK_ASSERT(sc_if);
 3503 
 3504         ifp = sc_if->sk_ifp;
 3505         sc = sc_if->sk_softc;
 3506         mii = device_get_softc(sc_if->sk_miibus);
 3507 
 3508         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3509                 return;
 3510 
 3511         /* Cancel pending I/O and free all RX/TX buffers. */
 3512         sk_stop(sc_if);
 3513 
 3514         if (sc->sk_type == SK_GENESIS) {
 3515                 /* Configure LINK_SYNC LED */
 3516                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
 3517                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
 3518                         SK_LINKLED_LINKSYNC_ON);
 3519 
 3520                 /* Configure RX LED */
 3521                 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
 3522                         SK_RXLEDCTL_COUNTER_START);
 3523 
 3524                 /* Configure TX LED */
 3525                 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
 3526                         SK_TXLEDCTL_COUNTER_START);
 3527         }
 3528 
 3529         /*
 3530          * Configure descriptor poll timer
 3531          *
 3532          * SK-NET GENESIS data sheet says that possibility of losing Start
 3533          * transmit command due to CPU/cache related interim storage problems
 3534          * under certain conditions. The document recommends a polling
 3535          * mechanism to send a Start transmit command to initiate transfer
 3536          * of ready descriptors regulary. To cope with this issue sk(4) now
 3537          * enables descriptor poll timer to initiate descriptor processing
 3538          * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
 3539          * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
 3540          * command instead of waiting for next descriptor polling time.
 3541          * The same rule may apply to Rx side too but it seems that is not
 3542          * needed at the moment.
 3543          * Since sk(4) uses descriptor polling as a last resort there is no
 3544          * need to set smaller polling time than maximum allowable one.
 3545          */
 3546         SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
 3547 
 3548         /* Configure I2C registers */
 3549 
 3550         /* Configure XMAC(s) */
 3551         switch (sc->sk_type) {
 3552         case SK_GENESIS:
 3553                 sk_init_xmac(sc_if);
 3554                 break;
 3555         case SK_YUKON:
 3556         case SK_YUKON_LITE:
 3557         case SK_YUKON_LP:
 3558                 sk_init_yukon(sc_if);
 3559                 break;
 3560         }
 3561         mii_mediachg(mii);
 3562 
 3563         if (sc->sk_type == SK_GENESIS) {
 3564                 /* Configure MAC FIFOs */
 3565                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
 3566                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
 3567                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
 3568 
 3569                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
 3570                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
 3571                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
 3572         }
 3573 
 3574         /* Configure transmit arbiter(s) */
 3575         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
 3576             SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
 3577 
 3578         /* Configure RAMbuffers */
 3579         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
 3580         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
 3581         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
 3582         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
 3583         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
 3584         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
 3585 
 3586         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
 3587         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
 3588         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
 3589         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
 3590         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
 3591         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
 3592         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
 3593 
 3594         /* Configure BMUs */
 3595         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
 3596         if (ifp->if_mtu > SK_MAX_FRAMELEN) {
 3597                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
 3598                     SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
 3599                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
 3600                     SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
 3601         } else {
 3602                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
 3603                     SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
 3604                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
 3605                     SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
 3606         }
 3607 
 3608         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
 3609         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
 3610             SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
 3611         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
 3612             SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
 3613 
 3614         /* Init descriptors */
 3615         if (ifp->if_mtu > SK_MAX_FRAMELEN)
 3616                 error = sk_init_jumbo_rx_ring(sc_if);
 3617         else
 3618                 error = sk_init_rx_ring(sc_if);
 3619         if (error != 0) {
 3620                 device_printf(sc_if->sk_if_dev,
 3621                     "initialization failed: no memory for rx buffers\n");
 3622                 sk_stop(sc_if);
 3623                 return;
 3624         }
 3625         sk_init_tx_ring(sc_if);
 3626 
 3627         /* Set interrupt moderation if changed via sysctl. */
 3628         imr = sk_win_read_4(sc, SK_IMTIMERINIT);
 3629         if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
 3630                 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
 3631                     sc->sk_int_ticks));
 3632                 if (bootverbose)
 3633                         device_printf(sc_if->sk_if_dev,
 3634                             "interrupt moderation is %d us.\n",
 3635                             sc->sk_int_mod);
 3636         }
 3637 
 3638         /* Configure interrupt handling */
 3639         CSR_READ_4(sc, SK_ISSR);
 3640         if (sc_if->sk_port == SK_PORT_A)
 3641                 sc->sk_intrmask |= SK_INTRS1;
 3642         else
 3643                 sc->sk_intrmask |= SK_INTRS2;
 3644 
 3645         sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
 3646 
 3647         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3648 
 3649         /* Start BMUs. */
 3650         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
 3651 
 3652         switch(sc->sk_type) {
 3653         case SK_GENESIS:
 3654                 /* Enable XMACs TX and RX state machines */
 3655                 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
 3656                 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 3657                 break;
 3658         case SK_YUKON:
 3659         case SK_YUKON_LITE:
 3660         case SK_YUKON_LP:
 3661                 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
 3662                 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
 3663 #if 0
 3664                 /* XXX disable 100Mbps and full duplex mode? */
 3665                 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
 3666 #endif
 3667                 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
 3668         }
 3669 
 3670         /* Activate descriptor polling timer */
 3671         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
 3672         /* start transfer of Tx descriptors */
 3673         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
 3674 
 3675         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3676         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3677 
 3678         switch (sc->sk_type) {
 3679         case SK_YUKON:
 3680         case SK_YUKON_LITE:
 3681         case SK_YUKON_LP:
 3682                 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
 3683                 break;
 3684         }
 3685 
 3686         callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
 3687 
 3688         return;
 3689 }
 3690 
 3691 static void
 3692 sk_stop(sc_if)
 3693         struct sk_if_softc      *sc_if;
 3694 {
 3695         int                     i;
 3696         struct sk_softc         *sc;
 3697         struct sk_txdesc        *txd;
 3698         struct sk_rxdesc        *rxd;
 3699         struct sk_rxdesc        *jrxd;
 3700         struct ifnet            *ifp;
 3701         u_int32_t               val;
 3702 
 3703         SK_IF_LOCK_ASSERT(sc_if);
 3704         sc = sc_if->sk_softc;
 3705         ifp = sc_if->sk_ifp;
 3706 
 3707         callout_stop(&sc_if->sk_tick_ch);
 3708         callout_stop(&sc_if->sk_watchdog_ch);
 3709 
 3710         /* stop Tx descriptor polling timer */
 3711         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
 3712         /* stop transfer of Tx descriptors */
 3713         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
 3714         for (i = 0; i < SK_TIMEOUT; i++) {
 3715                 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
 3716                 if ((val & SK_TXBMU_TX_STOP) == 0)
 3717                         break;
 3718                 DELAY(1);
 3719         }
 3720         if (i == SK_TIMEOUT)
 3721                 device_printf(sc_if->sk_if_dev,
 3722                     "can not stop transfer of Tx descriptor\n");
 3723         /* stop transfer of Rx descriptors */
 3724         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
 3725         for (i = 0; i < SK_TIMEOUT; i++) {
 3726                 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
 3727                 if ((val & SK_RXBMU_RX_STOP) == 0)
 3728                         break;
 3729                 DELAY(1);
 3730         }
 3731         if (i == SK_TIMEOUT)
 3732                 device_printf(sc_if->sk_if_dev,
 3733                     "can not stop transfer of Rx descriptor\n");
 3734 
 3735         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 3736                 /* Put PHY back into reset. */
 3737                 val = sk_win_read_4(sc, SK_GPIO);
 3738                 if (sc_if->sk_port == SK_PORT_A) {
 3739                         val |= SK_GPIO_DIR0;
 3740                         val &= ~SK_GPIO_DAT0;
 3741                 } else {
 3742                         val |= SK_GPIO_DIR2;
 3743                         val &= ~SK_GPIO_DAT2;
 3744                 }
 3745                 sk_win_write_4(sc, SK_GPIO, val);
 3746         }
 3747 
 3748         /* Turn off various components of this interface. */
 3749         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
 3750         switch (sc->sk_type) {
 3751         case SK_GENESIS:
 3752                 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
 3753                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
 3754                 break;
 3755         case SK_YUKON:
 3756         case SK_YUKON_LITE:
 3757         case SK_YUKON_LP:
 3758                 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
 3759                 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
 3760                 break;
 3761         }
 3762         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
 3763         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 3764         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
 3765         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 3766         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
 3767         SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
 3768         SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
 3769         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
 3770         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
 3771 
 3772         /* Disable interrupts */
 3773         if (sc_if->sk_port == SK_PORT_A)
 3774                 sc->sk_intrmask &= ~SK_INTRS1;
 3775         else
 3776                 sc->sk_intrmask &= ~SK_INTRS2;
 3777         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3778 
 3779         SK_XM_READ_2(sc_if, XM_ISR);
 3780         SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
 3781 
 3782         /* Free RX and TX mbufs still in the queues. */
 3783         for (i = 0; i < SK_RX_RING_CNT; i++) {
 3784                 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 3785                 if (rxd->rx_m != NULL) {
 3786                         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
 3787                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 3788                         bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
 3789                             rxd->rx_dmamap);
 3790                         m_freem(rxd->rx_m);
 3791                         rxd->rx_m = NULL;
 3792                 }
 3793         }
 3794         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 3795                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 3796                 if (jrxd->rx_m != NULL) {
 3797                         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
 3798                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 3799                         bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
 3800                             jrxd->rx_dmamap);
 3801                         m_freem(jrxd->rx_m);
 3802                         jrxd->rx_m = NULL;
 3803                 }
 3804         }
 3805         for (i = 0; i < SK_TX_RING_CNT; i++) {
 3806                 txd = &sc_if->sk_cdata.sk_txdesc[i];
 3807                 if (txd->tx_m != NULL) {
 3808                         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
 3809                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 3810                         bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
 3811                             txd->tx_dmamap);
 3812                         m_freem(txd->tx_m);
 3813                         txd->tx_m = NULL;
 3814                 }
 3815         }
 3816 
 3817         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
 3818 
 3819         return;
 3820 }
 3821 
 3822 static int
 3823 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3824 {
 3825         int error, value;
 3826 
 3827         if (!arg1)
 3828                 return (EINVAL);
 3829         value = *(int *)arg1;
 3830         error = sysctl_handle_int(oidp, &value, 0, req);
 3831         if (error || !req->newptr)
 3832                 return (error);
 3833         if (value < low || value > high)
 3834                 return (EINVAL);
 3835         *(int *)arg1 = value;
 3836         return (0);
 3837 }
 3838 
 3839 static int
 3840 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
 3841 {
 3842         return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
 3843 }

Cache object: 793e6ab9964d9f3aeb286a191817bf88


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.