The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/sk/if_sk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-4-Clause
    5  *
    6  * Copyright (c) 1997, 1998, 1999, 2000
    7  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by Bill Paul.
   20  * 4. Neither the name of the author nor the names of any co-contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   34  * THE POSSIBILITY OF SUCH DAMAGE.
   35  */
   36 /*-
   37  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   38  *
   39  * Permission to use, copy, modify, and distribute this software for any
   40  * purpose with or without fee is hereby granted, provided that the above
   41  * copyright notice and this permission notice appear in all copies.
   42  *
   43  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   44  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   45  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   46  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   47  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   48  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   49  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   50  */
   51 
   52 #include <sys/cdefs.h>
   53 __FBSDID("$FreeBSD$");
   54 
   55 /*
   56  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
   57  * the SK-984x series adapters, both single port and dual port.
   58  * References:
   59  *      The XaQti XMAC II datasheet,
   60  *  https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   61  *      The SysKonnect GEnesis manual, http://www.syskonnect.com
   62  *
   63  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
   64  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
   65  * convenience to others until Vitesse corrects this problem:
   66  *
   67  * https://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
   68  *
   69  * Written by Bill Paul <wpaul@ee.columbia.edu>
   70  * Department of Electrical Engineering
   71  * Columbia University, New York City
   72  */
   73 /*
   74  * The SysKonnect gigabit ethernet adapters consist of two main
   75  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
   76  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
   77  * components and a PHY while the GEnesis controller provides a PCI
   78  * interface with DMA support. Each card may have between 512K and
   79  * 2MB of SRAM on board depending on the configuration.
   80  *
   81  * The SysKonnect GEnesis controller can have either one or two XMAC
   82  * chips connected to it, allowing single or dual port NIC configurations.
   83  * SysKonnect has the distinction of being the only vendor on the market
   84  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
   85  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
   86  * XMAC registers. This driver takes advantage of these features to allow
   87  * both XMACs to operate as independent interfaces.
   88  */
   89 
   90 #include <sys/param.h>
   91 #include <sys/systm.h>
   92 #include <sys/bus.h>
   93 #include <sys/endian.h>
   94 #include <sys/mbuf.h>
   95 #include <sys/malloc.h>
   96 #include <sys/kernel.h>
   97 #include <sys/module.h>
   98 #include <sys/socket.h>
   99 #include <sys/sockio.h>
  100 #include <sys/queue.h>
  101 #include <sys/sysctl.h>
  102 
  103 #include <net/bpf.h>
  104 #include <net/ethernet.h>
  105 #include <net/if.h>
  106 #include <net/if_var.h>
  107 #include <net/if_arp.h>
  108 #include <net/if_dl.h>
  109 #include <net/if_media.h>
  110 #include <net/if_types.h>
  111 #include <net/if_vlan_var.h>
  112 
  113 #include <netinet/in.h>
  114 #include <netinet/in_systm.h>
  115 #include <netinet/ip.h>
  116 
  117 #include <machine/bus.h>
  118 #include <machine/in_cksum.h>
  119 #include <machine/resource.h>
  120 #include <sys/rman.h>
  121 
  122 #include <dev/mii/mii.h>
  123 #include <dev/mii/miivar.h>
  124 #include <dev/mii/brgphyreg.h>
  125 
  126 #include <dev/pci/pcireg.h>
  127 #include <dev/pci/pcivar.h>
  128 
  129 #if 0
  130 #define SK_USEIOSPACE
  131 #endif
  132 
  133 #include <dev/sk/if_skreg.h>
  134 #include <dev/sk/xmaciireg.h>
  135 #include <dev/sk/yukonreg.h>
  136 
  137 MODULE_DEPEND(sk, pci, 1, 1, 1);
  138 MODULE_DEPEND(sk, ether, 1, 1, 1);
  139 MODULE_DEPEND(sk, miibus, 1, 1, 1);
  140 
  141 /* "device miibus" required.  See GENERIC if you get errors here. */
  142 #include "miibus_if.h"
  143 
  144 static const struct sk_type sk_devs[] = {
  145         {
  146                 VENDORID_SK,
  147                 DEVICEID_SK_V1,
  148                 "SysKonnect Gigabit Ethernet (V1.0)"
  149         },
  150         {
  151                 VENDORID_SK,
  152                 DEVICEID_SK_V2,
  153                 "SysKonnect Gigabit Ethernet (V2.0)"
  154         },
  155         {
  156                 VENDORID_MARVELL,
  157                 DEVICEID_SK_V2,
  158                 "Marvell Gigabit Ethernet"
  159         },
  160         {
  161                 VENDORID_MARVELL,
  162                 DEVICEID_BELKIN_5005,
  163                 "Belkin F5D5005 Gigabit Ethernet"
  164         },
  165         {
  166                 VENDORID_3COM,
  167                 DEVICEID_3COM_3C940,
  168                 "3Com 3C940 Gigabit Ethernet"
  169         },
  170         {
  171                 VENDORID_LINKSYS,
  172                 DEVICEID_LINKSYS_EG1032,
  173                 "Linksys EG1032 Gigabit Ethernet"
  174         },
  175         {
  176                 VENDORID_DLINK,
  177                 DEVICEID_DLINK_DGE530T_A1,
  178                 "D-Link DGE-530T Gigabit Ethernet"
  179         },
  180         {
  181                 VENDORID_DLINK,
  182                 DEVICEID_DLINK_DGE530T_B1,
  183                 "D-Link DGE-530T Gigabit Ethernet"
  184         },
  185         { 0, 0, NULL }
  186 };
  187 
  188 static int skc_probe(device_t);
  189 static int skc_attach(device_t);
  190 static int skc_detach(device_t);
  191 static int skc_shutdown(device_t);
  192 static int skc_suspend(device_t);
  193 static int skc_resume(device_t);
  194 static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
  195 static int sk_detach(device_t);
  196 static int sk_probe(device_t);
  197 static int sk_attach(device_t);
  198 static void sk_tick(void *);
  199 static void sk_yukon_tick(void *);
  200 static void sk_intr(void *);
  201 static void sk_intr_xmac(struct sk_if_softc *);
  202 static void sk_intr_bcom(struct sk_if_softc *);
  203 static void sk_intr_yukon(struct sk_if_softc *);
  204 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
  205 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
  206 static void sk_rxeof(struct sk_if_softc *);
  207 static void sk_jumbo_rxeof(struct sk_if_softc *);
  208 static void sk_txeof(struct sk_if_softc *);
  209 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
  210 static int sk_encap(struct sk_if_softc *, struct mbuf **);
  211 static void sk_start(struct ifnet *);
  212 static void sk_start_locked(struct ifnet *);
  213 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
  214 static void sk_init(void *);
  215 static void sk_init_locked(struct sk_if_softc *);
  216 static void sk_init_xmac(struct sk_if_softc *);
  217 static void sk_init_yukon(struct sk_if_softc *);
  218 static void sk_stop(struct sk_if_softc *);
  219 static void sk_watchdog(void *);
  220 static int sk_ifmedia_upd(struct ifnet *);
  221 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  222 static void sk_reset(struct sk_softc *);
  223 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
  224 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
  225 static int sk_newbuf(struct sk_if_softc *, int);
  226 static int sk_jumbo_newbuf(struct sk_if_softc *, int);
  227 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  228 static int sk_dma_alloc(struct sk_if_softc *);
  229 static int sk_dma_jumbo_alloc(struct sk_if_softc *);
  230 static void sk_dma_free(struct sk_if_softc *);
  231 static void sk_dma_jumbo_free(struct sk_if_softc *);
  232 static int sk_init_rx_ring(struct sk_if_softc *);
  233 static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
  234 static void sk_init_tx_ring(struct sk_if_softc *);
  235 static u_int32_t sk_win_read_4(struct sk_softc *, int);
  236 static u_int16_t sk_win_read_2(struct sk_softc *, int);
  237 static u_int8_t sk_win_read_1(struct sk_softc *, int);
  238 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
  239 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
  240 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
  241 
  242 static int sk_miibus_readreg(device_t, int, int);
  243 static int sk_miibus_writereg(device_t, int, int, int);
  244 static void sk_miibus_statchg(device_t);
  245 
  246 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
  247 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
  248                                                 int);
  249 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
  250 
  251 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
  252 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
  253                                                 int);
  254 static void sk_marv_miibus_statchg(struct sk_if_softc *);
  255 
  256 static uint32_t sk_xmchash(const uint8_t *);
  257 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
  258 static void sk_rxfilter(struct sk_if_softc *);
  259 static void sk_rxfilter_genesis(struct sk_if_softc *);
  260 static void sk_rxfilter_yukon(struct sk_if_softc *);
  261 
  262 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
  263 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
  264 
  265 /* Tunables. */
  266 static int jumbo_disable = 0;
  267 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
  268 
  269 /*
  270  * It seems that SK-NET GENESIS supports very simple checksum offload
  271  * capability for Tx and I believe it can generate 0 checksum value for
  272  * UDP packets in Tx as the hardware can't differenciate UDP packets from
  273  * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
  274  * means sender didn't perforam checksum computation. For the safety I
  275  * disabled UDP checksum offload capability at the moment.
  276  */
  277 #define SK_CSUM_FEATURES        (CSUM_TCP)
  278 
  279 /*
  280  * Note that we have newbus methods for both the GEnesis controller
  281  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
  282  * the miibus code is a child of the XMACs. We need to do it this way
  283  * so that the miibus drivers can access the PHY registers on the
  284  * right PHY. It's not quite what I had in mind, but it's the only
  285  * design that achieves the desired effect.
  286  */
  287 static device_method_t skc_methods[] = {
  288         /* Device interface */
  289         DEVMETHOD(device_probe,         skc_probe),
  290         DEVMETHOD(device_attach,        skc_attach),
  291         DEVMETHOD(device_detach,        skc_detach),
  292         DEVMETHOD(device_suspend,       skc_suspend),
  293         DEVMETHOD(device_resume,        skc_resume),
  294         DEVMETHOD(device_shutdown,      skc_shutdown),
  295 
  296         DEVMETHOD(bus_get_dma_tag,      skc_get_dma_tag),
  297 
  298         DEVMETHOD_END
  299 };
  300 
  301 static driver_t skc_driver = {
  302         "skc",
  303         skc_methods,
  304         sizeof(struct sk_softc)
  305 };
  306 
  307 static device_method_t sk_methods[] = {
  308         /* Device interface */
  309         DEVMETHOD(device_probe,         sk_probe),
  310         DEVMETHOD(device_attach,        sk_attach),
  311         DEVMETHOD(device_detach,        sk_detach),
  312         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  313 
  314         /* MII interface */
  315         DEVMETHOD(miibus_readreg,       sk_miibus_readreg),
  316         DEVMETHOD(miibus_writereg,      sk_miibus_writereg),
  317         DEVMETHOD(miibus_statchg,       sk_miibus_statchg),
  318 
  319         DEVMETHOD_END
  320 };
  321 
  322 static driver_t sk_driver = {
  323         "sk",
  324         sk_methods,
  325         sizeof(struct sk_if_softc)
  326 };
  327 
  328 DRIVER_MODULE(skc, pci, skc_driver, NULL, NULL);
  329 DRIVER_MODULE(sk, skc, sk_driver, NULL, NULL);
  330 DRIVER_MODULE(miibus, sk, miibus_driver, NULL, NULL);
  331 
  332 static struct resource_spec sk_res_spec_io[] = {
  333         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  334         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  335         { -1,                   0,              0 }
  336 };
  337 
  338 static struct resource_spec sk_res_spec_mem[] = {
  339         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  340         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  341         { -1,                   0,              0 }
  342 };
  343 
  344 #define SK_SETBIT(sc, reg, x)           \
  345         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
  346 
  347 #define SK_CLRBIT(sc, reg, x)           \
  348         CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
  349 
  350 #define SK_WIN_SETBIT_4(sc, reg, x)     \
  351         sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
  352 
  353 #define SK_WIN_CLRBIT_4(sc, reg, x)     \
  354         sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
  355 
  356 #define SK_WIN_SETBIT_2(sc, reg, x)     \
  357         sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
  358 
  359 #define SK_WIN_CLRBIT_2(sc, reg, x)     \
  360         sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
  361 
  362 static u_int32_t
  363 sk_win_read_4(sc, reg)
  364         struct sk_softc         *sc;
  365         int                     reg;
  366 {
  367 #ifdef SK_USEIOSPACE
  368         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  369         return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
  370 #else
  371         return(CSR_READ_4(sc, reg));
  372 #endif
  373 }
  374 
  375 static u_int16_t
  376 sk_win_read_2(sc, reg)
  377         struct sk_softc         *sc;
  378         int                     reg;
  379 {
  380 #ifdef SK_USEIOSPACE
  381         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  382         return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
  383 #else
  384         return(CSR_READ_2(sc, reg));
  385 #endif
  386 }
  387 
  388 static u_int8_t
  389 sk_win_read_1(sc, reg)
  390         struct sk_softc         *sc;
  391         int                     reg;
  392 {
  393 #ifdef SK_USEIOSPACE
  394         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  395         return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
  396 #else
  397         return(CSR_READ_1(sc, reg));
  398 #endif
  399 }
  400 
  401 static void
  402 sk_win_write_4(sc, reg, val)
  403         struct sk_softc         *sc;
  404         int                     reg;
  405         u_int32_t               val;
  406 {
  407 #ifdef SK_USEIOSPACE
  408         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  409         CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
  410 #else
  411         CSR_WRITE_4(sc, reg, val);
  412 #endif
  413         return;
  414 }
  415 
  416 static void
  417 sk_win_write_2(sc, reg, val)
  418         struct sk_softc         *sc;
  419         int                     reg;
  420         u_int32_t               val;
  421 {
  422 #ifdef SK_USEIOSPACE
  423         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  424         CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
  425 #else
  426         CSR_WRITE_2(sc, reg, val);
  427 #endif
  428         return;
  429 }
  430 
  431 static void
  432 sk_win_write_1(sc, reg, val)
  433         struct sk_softc         *sc;
  434         int                     reg;
  435         u_int32_t               val;
  436 {
  437 #ifdef SK_USEIOSPACE
  438         CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
  439         CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
  440 #else
  441         CSR_WRITE_1(sc, reg, val);
  442 #endif
  443         return;
  444 }
  445 
  446 static int
  447 sk_miibus_readreg(dev, phy, reg)
  448         device_t                dev;
  449         int                     phy, reg;
  450 {
  451         struct sk_if_softc      *sc_if;
  452         int                     v;
  453 
  454         sc_if = device_get_softc(dev);
  455 
  456         SK_IF_MII_LOCK(sc_if);
  457         switch(sc_if->sk_softc->sk_type) {
  458         case SK_GENESIS:
  459                 v = sk_xmac_miibus_readreg(sc_if, phy, reg);
  460                 break;
  461         case SK_YUKON:
  462         case SK_YUKON_LITE:
  463         case SK_YUKON_LP:
  464                 v = sk_marv_miibus_readreg(sc_if, phy, reg);
  465                 break;
  466         default:
  467                 v = 0;
  468                 break;
  469         }
  470         SK_IF_MII_UNLOCK(sc_if);
  471 
  472         return (v);
  473 }
  474 
  475 static int
  476 sk_miibus_writereg(dev, phy, reg, val)
  477         device_t                dev;
  478         int                     phy, reg, val;
  479 {
  480         struct sk_if_softc      *sc_if;
  481         int                     v;
  482 
  483         sc_if = device_get_softc(dev);
  484 
  485         SK_IF_MII_LOCK(sc_if);
  486         switch(sc_if->sk_softc->sk_type) {
  487         case SK_GENESIS:
  488                 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
  489                 break;
  490         case SK_YUKON:
  491         case SK_YUKON_LITE:
  492         case SK_YUKON_LP:
  493                 v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
  494                 break;
  495         default:
  496                 v = 0;
  497                 break;
  498         }
  499         SK_IF_MII_UNLOCK(sc_if);
  500 
  501         return (v);
  502 }
  503 
  504 static void
  505 sk_miibus_statchg(dev)
  506         device_t                dev;
  507 {
  508         struct sk_if_softc      *sc_if;
  509 
  510         sc_if = device_get_softc(dev);
  511 
  512         SK_IF_MII_LOCK(sc_if);
  513         switch(sc_if->sk_softc->sk_type) {
  514         case SK_GENESIS:
  515                 sk_xmac_miibus_statchg(sc_if);
  516                 break;
  517         case SK_YUKON:
  518         case SK_YUKON_LITE:
  519         case SK_YUKON_LP:
  520                 sk_marv_miibus_statchg(sc_if);
  521                 break;
  522         }
  523         SK_IF_MII_UNLOCK(sc_if);
  524 
  525         return;
  526 }
  527 
  528 static int
  529 sk_xmac_miibus_readreg(sc_if, phy, reg)
  530         struct sk_if_softc      *sc_if;
  531         int                     phy, reg;
  532 {
  533         int                     i;
  534 
  535         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
  536         SK_XM_READ_2(sc_if, XM_PHY_DATA);
  537         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
  538                 for (i = 0; i < SK_TIMEOUT; i++) {
  539                         DELAY(1);
  540                         if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
  541                             XM_MMUCMD_PHYDATARDY)
  542                                 break;
  543                 }
  544 
  545                 if (i == SK_TIMEOUT) {
  546                         if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  547                         return(0);
  548                 }
  549         }
  550         DELAY(1);
  551         i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
  552 
  553         return(i);
  554 }
  555 
  556 static int
  557 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
  558         struct sk_if_softc      *sc_if;
  559         int                     phy, reg, val;
  560 {
  561         int                     i;
  562 
  563         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
  564         for (i = 0; i < SK_TIMEOUT; i++) {
  565                 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
  566                         break;
  567         }
  568 
  569         if (i == SK_TIMEOUT) {
  570                 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  571                 return (ETIMEDOUT);
  572         }
  573 
  574         SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
  575         for (i = 0; i < SK_TIMEOUT; i++) {
  576                 DELAY(1);
  577                 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
  578                         break;
  579         }
  580         if (i == SK_TIMEOUT)
  581                 if_printf(sc_if->sk_ifp, "phy write timed out\n");
  582 
  583         return(0);
  584 }
  585 
  586 static void
  587 sk_xmac_miibus_statchg(sc_if)
  588         struct sk_if_softc      *sc_if;
  589 {
  590         struct mii_data         *mii;
  591 
  592         mii = device_get_softc(sc_if->sk_miibus);
  593 
  594         /*
  595          * If this is a GMII PHY, manually set the XMAC's
  596          * duplex mode accordingly.
  597          */
  598         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
  599                 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
  600                         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
  601                 } else {
  602                         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
  603                 }
  604         }
  605 }
  606 
  607 static int
  608 sk_marv_miibus_readreg(sc_if, phy, reg)
  609         struct sk_if_softc      *sc_if;
  610         int                     phy, reg;
  611 {
  612         u_int16_t               val;
  613         int                     i;
  614 
  615         if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
  616             sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
  617                 return(0);
  618         }
  619 
  620         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  621                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
  622 
  623         for (i = 0; i < SK_TIMEOUT; i++) {
  624                 DELAY(1);
  625                 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
  626                 if (val & YU_SMICR_READ_VALID)
  627                         break;
  628         }
  629 
  630         if (i == SK_TIMEOUT) {
  631                 if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
  632                 return(0);
  633         }
  634 
  635         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
  636 
  637         return(val);
  638 }
  639 
  640 static int
  641 sk_marv_miibus_writereg(sc_if, phy, reg, val)
  642         struct sk_if_softc      *sc_if;
  643         int                     phy, reg, val;
  644 {
  645         int                     i;
  646 
  647         SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
  648         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
  649                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
  650 
  651         for (i = 0; i < SK_TIMEOUT; i++) {
  652                 DELAY(1);
  653                 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
  654                         break;
  655         }
  656         if (i == SK_TIMEOUT)
  657                 if_printf(sc_if->sk_ifp, "phy write timeout\n");
  658 
  659         return(0);
  660 }
  661 
  662 static void
  663 sk_marv_miibus_statchg(sc_if)
  664         struct sk_if_softc      *sc_if;
  665 {
  666         return;
  667 }
  668 
  669 #define HASH_BITS               6
  670 
  671 static u_int32_t
  672 sk_xmchash(addr)
  673         const uint8_t *addr;
  674 {
  675         uint32_t crc;
  676 
  677         /* Compute CRC for the address value. */
  678         crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
  679 
  680         return (~crc & ((1 << HASH_BITS) - 1));
  681 }
  682 
  683 static void
  684 sk_setfilt(sc_if, addr, slot)
  685         struct sk_if_softc      *sc_if;
  686         u_int16_t               *addr;
  687         int                     slot;
  688 {
  689         int                     base;
  690 
  691         base = XM_RXFILT_ENTRY(slot);
  692 
  693         SK_XM_WRITE_2(sc_if, base, addr[0]);
  694         SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
  695         SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
  696 
  697         return;
  698 }
  699 
  700 static void
  701 sk_rxfilter(sc_if)
  702         struct sk_if_softc      *sc_if;
  703 {
  704         struct sk_softc         *sc;
  705 
  706         SK_IF_LOCK_ASSERT(sc_if);
  707 
  708         sc = sc_if->sk_softc;
  709         if (sc->sk_type == SK_GENESIS)
  710                 sk_rxfilter_genesis(sc_if);
  711         else
  712                 sk_rxfilter_yukon(sc_if);
  713 }
  714 
  715 struct sk_add_maddr_genesis_ctx {
  716         struct sk_if_softc *sc_if;
  717         uint32_t hashes[2];
  718         uint32_t mode;
  719 };
  720 
  721 static u_int
  722 sk_add_maddr_genesis(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  723 {
  724         struct sk_add_maddr_genesis_ctx *ctx = arg;
  725         int h;
  726 
  727         /*
  728          * Program the first XM_RXFILT_MAX multicast groups
  729          * into the perfect filter.
  730          */
  731         if (cnt + 1 < XM_RXFILT_MAX) {
  732                 sk_setfilt(ctx->sc_if, (uint16_t *)LLADDR(sdl), cnt + 1);
  733                 ctx->mode |= XM_MODE_RX_USE_PERFECT;
  734                 return (1);
  735         }
  736         h = sk_xmchash((const uint8_t *)LLADDR(sdl));
  737         if (h < 32)
  738                 ctx->hashes[0] |= (1 << h);
  739         else
  740                 ctx->hashes[1] |= (1 << (h - 32));
  741         ctx->mode |= XM_MODE_RX_USE_HASH;
  742 
  743         return (1);
  744 }
  745 
  746 static void
  747 sk_rxfilter_genesis(struct sk_if_softc *sc_if)
  748 {
  749         struct ifnet            *ifp = sc_if->sk_ifp;
  750         struct sk_add_maddr_genesis_ctx ctx = { sc_if, { 0, 0 } };
  751         int                     i;
  752         u_int16_t               dummy[] = { 0, 0, 0 };
  753 
  754         SK_IF_LOCK_ASSERT(sc_if);
  755 
  756         ctx.mode = SK_XM_READ_4(sc_if, XM_MODE);
  757         ctx.mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
  758             XM_MODE_RX_USE_PERFECT);
  759         /* First, zot all the existing perfect filters. */
  760         for (i = 1; i < XM_RXFILT_MAX; i++)
  761                 sk_setfilt(sc_if, dummy, i);
  762 
  763         /* Now program new ones. */
  764         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
  765                 if (ifp->if_flags & IFF_ALLMULTI)
  766                         ctx.mode |= XM_MODE_RX_USE_HASH;
  767                 if (ifp->if_flags & IFF_PROMISC)
  768                         ctx.mode |= XM_MODE_RX_PROMISC;
  769                 ctx.hashes[0] = 0xFFFFFFFF;
  770                 ctx.hashes[1] = 0xFFFFFFFF;
  771         } else
  772                 /* XXX want to maintain reverse semantics */
  773                 if_foreach_llmaddr(ifp, sk_add_maddr_genesis, &ctx);
  774 
  775         SK_XM_WRITE_4(sc_if, XM_MODE, ctx.mode);
  776         SK_XM_WRITE_4(sc_if, XM_MAR0, ctx.hashes[0]);
  777         SK_XM_WRITE_4(sc_if, XM_MAR2, ctx.hashes[1]);
  778 }
  779 
  780 static u_int
  781 sk_hash_maddr_yukon(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  782 {
  783         uint32_t crc, *hashes = arg;
  784 
  785         crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
  786         /* Just want the 6 least significant bits. */
  787         crc &= 0x3f;
  788         /* Set the corresponding bit in the hash table. */
  789         hashes[crc >> 5] |= 1 << (crc & 0x1f);
  790 
  791         return (1);
  792 }
  793 
  794 static void
  795 sk_rxfilter_yukon(struct sk_if_softc *sc_if)
  796 {
  797         struct ifnet            *ifp;
  798         uint32_t                hashes[2] = { 0, 0 }, mode;
  799 
  800         SK_IF_LOCK_ASSERT(sc_if);
  801 
  802         ifp = sc_if->sk_ifp;
  803         mode = SK_YU_READ_2(sc_if, YUKON_RCR);
  804         if (ifp->if_flags & IFF_PROMISC)
  805                 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN); 
  806         else if (ifp->if_flags & IFF_ALLMULTI) {
  807                 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN; 
  808                 hashes[0] = 0xFFFFFFFF;
  809                 hashes[1] = 0xFFFFFFFF;
  810         } else {
  811                 mode |= YU_RCR_UFLEN;
  812                 if_foreach_llmaddr(ifp, sk_hash_maddr_yukon, hashes);
  813                 if (hashes[0] != 0 || hashes[1] != 0)
  814                         mode |= YU_RCR_MUFLEN;
  815         }
  816 
  817         SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
  818         SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
  819         SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
  820         SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
  821         SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
  822 }
  823 
  824 static int
  825 sk_init_rx_ring(sc_if)
  826         struct sk_if_softc      *sc_if;
  827 {
  828         struct sk_ring_data     *rd;
  829         bus_addr_t              addr;
  830         u_int32_t               csum_start;
  831         int                     i;
  832 
  833         sc_if->sk_cdata.sk_rx_cons = 0;
  834 
  835         csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
  836             ETHER_HDR_LEN;
  837         rd = &sc_if->sk_rdata;
  838         bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
  839         for (i = 0; i < SK_RX_RING_CNT; i++) {
  840                 if (sk_newbuf(sc_if, i) != 0)
  841                         return (ENOBUFS);
  842                 if (i == (SK_RX_RING_CNT - 1))
  843                         addr = SK_RX_RING_ADDR(sc_if, 0);
  844                 else
  845                         addr = SK_RX_RING_ADDR(sc_if, i + 1);
  846                 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  847                 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
  848         }
  849 
  850         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
  851             sc_if->sk_cdata.sk_rx_ring_map,
  852             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  853 
  854         return(0);
  855 }
  856 
  857 static int
  858 sk_init_jumbo_rx_ring(sc_if)
  859         struct sk_if_softc      *sc_if;
  860 {
  861         struct sk_ring_data     *rd;
  862         bus_addr_t              addr;
  863         u_int32_t               csum_start;
  864         int                     i;
  865 
  866         sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
  867 
  868         csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
  869             ETHER_HDR_LEN;
  870         rd = &sc_if->sk_rdata;
  871         bzero(rd->sk_jumbo_rx_ring,
  872             sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
  873         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
  874                 if (sk_jumbo_newbuf(sc_if, i) != 0)
  875                         return (ENOBUFS);
  876                 if (i == (SK_JUMBO_RX_RING_CNT - 1))
  877                         addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
  878                 else
  879                         addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
  880                 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  881                 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
  882         }
  883 
  884         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
  885             sc_if->sk_cdata.sk_jumbo_rx_ring_map,
  886             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  887 
  888         return (0);
  889 }
  890 
  891 static void
  892 sk_init_tx_ring(sc_if)
  893         struct sk_if_softc      *sc_if;
  894 {
  895         struct sk_ring_data     *rd;
  896         struct sk_txdesc        *txd;
  897         bus_addr_t              addr;
  898         int                     i;
  899 
  900         STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
  901         STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
  902 
  903         sc_if->sk_cdata.sk_tx_prod = 0;
  904         sc_if->sk_cdata.sk_tx_cons = 0;
  905         sc_if->sk_cdata.sk_tx_cnt = 0;
  906 
  907         rd = &sc_if->sk_rdata;
  908         bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
  909         for (i = 0; i < SK_TX_RING_CNT; i++) {
  910                 if (i == (SK_TX_RING_CNT - 1))
  911                         addr = SK_TX_RING_ADDR(sc_if, 0);
  912                 else
  913                         addr = SK_TX_RING_ADDR(sc_if, i + 1);
  914                 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
  915                 txd = &sc_if->sk_cdata.sk_txdesc[i];
  916                 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
  917         }
  918 
  919         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
  920             sc_if->sk_cdata.sk_tx_ring_map,
  921             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  922 }
  923 
  924 static __inline void
  925 sk_discard_rxbuf(sc_if, idx)
  926         struct sk_if_softc      *sc_if;
  927         int                     idx;
  928 {
  929         struct sk_rx_desc       *r;
  930         struct sk_rxdesc        *rxd;
  931         struct mbuf             *m;
  932 
  933         r = &sc_if->sk_rdata.sk_rx_ring[idx];
  934         rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
  935         m = rxd->rx_m;
  936         r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
  937 }
  938 
  939 static __inline void
  940 sk_discard_jumbo_rxbuf(sc_if, idx)
  941         struct sk_if_softc      *sc_if;
  942         int                     idx;
  943 {
  944         struct sk_rx_desc       *r;
  945         struct sk_rxdesc        *rxd;
  946         struct mbuf             *m;
  947 
  948         r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
  949         rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
  950         m = rxd->rx_m;
  951         r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
  952 }
  953 
  954 static int
  955 sk_newbuf(sc_if, idx)
  956         struct sk_if_softc      *sc_if;
  957         int                     idx;
  958 {
  959         struct sk_rx_desc       *r;
  960         struct sk_rxdesc        *rxd;
  961         struct mbuf             *m;
  962         bus_dma_segment_t       segs[1];
  963         bus_dmamap_t            map;
  964         int                     nsegs;
  965 
  966         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  967         if (m == NULL)
  968                 return (ENOBUFS);
  969         m->m_len = m->m_pkthdr.len = MCLBYTES;
  970         m_adj(m, ETHER_ALIGN);
  971 
  972         if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
  973             sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
  974                 m_freem(m);
  975                 return (ENOBUFS);
  976         }
  977         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  978 
  979         rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
  980         if (rxd->rx_m != NULL) {
  981                 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
  982                     BUS_DMASYNC_POSTREAD);
  983                 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
  984         }
  985         map = rxd->rx_dmamap;
  986         rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
  987         sc_if->sk_cdata.sk_rx_sparemap = map;
  988         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
  989             BUS_DMASYNC_PREREAD);
  990         rxd->rx_m = m;
  991         r = &sc_if->sk_rdata.sk_rx_ring[idx];
  992         r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
  993         r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
  994         r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
  995 
  996         return (0);
  997 }
  998 
  999 static int
 1000 sk_jumbo_newbuf(sc_if, idx)
 1001         struct sk_if_softc      *sc_if;
 1002         int                     idx;
 1003 {
 1004         struct sk_rx_desc       *r;
 1005         struct sk_rxdesc        *rxd;
 1006         struct mbuf             *m;
 1007         bus_dma_segment_t       segs[1];
 1008         bus_dmamap_t            map;
 1009         int                     nsegs;
 1010 
 1011         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
 1012         if (m == NULL)
 1013                 return (ENOBUFS);
 1014         m->m_pkthdr.len = m->m_len = MJUM9BYTES;
 1015         /*
 1016          * Adjust alignment so packet payload begins on a
 1017          * longword boundary. Mandatory for Alpha, useful on
 1018          * x86 too.
 1019          */
 1020         m_adj(m, ETHER_ALIGN);
 1021 
 1022         if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
 1023             sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
 1024                 m_freem(m);
 1025                 return (ENOBUFS);
 1026         }
 1027         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
 1028 
 1029         rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
 1030         if (rxd->rx_m != NULL) {
 1031                 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
 1032                     BUS_DMASYNC_POSTREAD);
 1033                 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
 1034                     rxd->rx_dmamap);
 1035         }
 1036         map = rxd->rx_dmamap;
 1037         rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
 1038         sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
 1039         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
 1040             BUS_DMASYNC_PREREAD);
 1041         rxd->rx_m = m;
 1042         r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
 1043         r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
 1044         r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
 1045         r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
 1046 
 1047         return (0);
 1048 }
 1049 
 1050 /*
 1051  * Set media options.
 1052  */
 1053 static int
 1054 sk_ifmedia_upd(ifp)
 1055         struct ifnet            *ifp;
 1056 {
 1057         struct sk_if_softc      *sc_if = ifp->if_softc;
 1058         struct mii_data         *mii;
 1059 
 1060         mii = device_get_softc(sc_if->sk_miibus);
 1061         sk_init(sc_if);
 1062         mii_mediachg(mii);
 1063 
 1064         return(0);
 1065 }
 1066 
 1067 /*
 1068  * Report current media status.
 1069  */
 1070 static void
 1071 sk_ifmedia_sts(ifp, ifmr)
 1072         struct ifnet            *ifp;
 1073         struct ifmediareq       *ifmr;
 1074 {
 1075         struct sk_if_softc      *sc_if;
 1076         struct mii_data         *mii;
 1077 
 1078         sc_if = ifp->if_softc;
 1079         mii = device_get_softc(sc_if->sk_miibus);
 1080 
 1081         mii_pollstat(mii);
 1082         ifmr->ifm_active = mii->mii_media_active;
 1083         ifmr->ifm_status = mii->mii_media_status;
 1084 
 1085         return;
 1086 }
 1087 
 1088 static int
 1089 sk_ioctl(ifp, command, data)
 1090         struct ifnet            *ifp;
 1091         u_long                  command;
 1092         caddr_t                 data;
 1093 {
 1094         struct sk_if_softc      *sc_if = ifp->if_softc;
 1095         struct ifreq            *ifr = (struct ifreq *) data;
 1096         int                     error, mask;
 1097         struct mii_data         *mii;
 1098 
 1099         error = 0;
 1100         switch(command) {
 1101         case SIOCSIFMTU:
 1102                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
 1103                         error = EINVAL;
 1104                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1105                         if (sc_if->sk_jumbo_disable != 0 &&
 1106                             ifr->ifr_mtu > SK_MAX_FRAMELEN)
 1107                                 error = EINVAL;
 1108                         else {
 1109                                 SK_IF_LOCK(sc_if);
 1110                                 ifp->if_mtu = ifr->ifr_mtu;
 1111                                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1112                                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1113                                         sk_init_locked(sc_if);
 1114                                 }
 1115                                 SK_IF_UNLOCK(sc_if);
 1116                         }
 1117                 }
 1118                 break;
 1119         case SIOCSIFFLAGS:
 1120                 SK_IF_LOCK(sc_if);
 1121                 if (ifp->if_flags & IFF_UP) {
 1122                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1123                                 if ((ifp->if_flags ^ sc_if->sk_if_flags)
 1124                                     & (IFF_PROMISC | IFF_ALLMULTI))
 1125                                         sk_rxfilter(sc_if);
 1126                         } else
 1127                                 sk_init_locked(sc_if);
 1128                 } else {
 1129                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1130                                 sk_stop(sc_if);
 1131                 }
 1132                 sc_if->sk_if_flags = ifp->if_flags;
 1133                 SK_IF_UNLOCK(sc_if);
 1134                 break;
 1135         case SIOCADDMULTI:
 1136         case SIOCDELMULTI:
 1137                 SK_IF_LOCK(sc_if);
 1138                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1139                         sk_rxfilter(sc_if);
 1140                 SK_IF_UNLOCK(sc_if);
 1141                 break;
 1142         case SIOCGIFMEDIA:
 1143         case SIOCSIFMEDIA:
 1144                 mii = device_get_softc(sc_if->sk_miibus);
 1145                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1146                 break;
 1147         case SIOCSIFCAP:
 1148                 SK_IF_LOCK(sc_if);
 1149                 if (sc_if->sk_softc->sk_type == SK_GENESIS) {
 1150                         SK_IF_UNLOCK(sc_if);
 1151                         break;
 1152                 }
 1153                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1154                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1155                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1156                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1157                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1158                                 ifp->if_hwassist |= SK_CSUM_FEATURES;
 1159                         else
 1160                                 ifp->if_hwassist &= ~SK_CSUM_FEATURES;
 1161                 }
 1162                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1163                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 
 1164                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1165                 SK_IF_UNLOCK(sc_if);
 1166                 break;
 1167         default:
 1168                 error = ether_ioctl(ifp, command, data);
 1169                 break;
 1170         }
 1171 
 1172         return (error);
 1173 }
 1174 
 1175 /*
 1176  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
 1177  * IDs against our list and return a device name if we find a match.
 1178  */
 1179 static int
 1180 skc_probe(dev)
 1181         device_t                dev;
 1182 {
 1183         const struct sk_type    *t = sk_devs;
 1184 
 1185         while(t->sk_name != NULL) {
 1186                 if ((pci_get_vendor(dev) == t->sk_vid) &&
 1187                     (pci_get_device(dev) == t->sk_did)) {
 1188                         /*
 1189                          * Only attach to rev. 2 of the Linksys EG1032 adapter.
 1190                          * Rev. 3 is supported by re(4).
 1191                          */
 1192                         if ((t->sk_vid == VENDORID_LINKSYS) &&
 1193                                 (t->sk_did == DEVICEID_LINKSYS_EG1032) &&
 1194                                 (pci_get_subdevice(dev) !=
 1195                                  SUBDEVICEID_LINKSYS_EG1032_REV2)) {
 1196                                 t++;
 1197                                 continue;
 1198                         }
 1199                         device_set_desc(dev, t->sk_name);
 1200                         return (BUS_PROBE_DEFAULT);
 1201                 }
 1202                 t++;
 1203         }
 1204 
 1205         return(ENXIO);
 1206 }
 1207 
 1208 /*
 1209  * Force the GEnesis into reset, then bring it out of reset.
 1210  */
 1211 static void
 1212 sk_reset(sc)
 1213         struct sk_softc         *sc;
 1214 {
 1215 
 1216         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
 1217         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
 1218         if (SK_YUKON_FAMILY(sc->sk_type))
 1219                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
 1220 
 1221         DELAY(1000);
 1222         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
 1223         DELAY(2);
 1224         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
 1225         if (SK_YUKON_FAMILY(sc->sk_type))
 1226                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
 1227 
 1228         if (sc->sk_type == SK_GENESIS) {
 1229                 /* Configure packet arbiter */
 1230                 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
 1231                 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
 1232                 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
 1233                 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
 1234                 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
 1235         }
 1236 
 1237         /* Enable RAM interface */
 1238         sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
 1239 
 1240         /*
 1241          * Configure interrupt moderation. The moderation timer
 1242          * defers interrupts specified in the interrupt moderation
 1243          * timer mask based on the timeout specified in the interrupt
 1244          * moderation timer init register. Each bit in the timer
 1245          * register represents one tick, so to specify a timeout in
 1246          * microseconds, we have to multiply by the correct number of
 1247          * ticks-per-microsecond.
 1248          */
 1249         switch (sc->sk_type) {
 1250         case SK_GENESIS:
 1251                 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
 1252                 break;
 1253         default:
 1254                 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
 1255                 break;
 1256         }
 1257         if (bootverbose)
 1258                 device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
 1259                     sc->sk_int_mod);
 1260         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
 1261             sc->sk_int_ticks));
 1262         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
 1263             SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
 1264         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
 1265 
 1266         return;
 1267 }
 1268 
 1269 static int
 1270 sk_probe(dev)
 1271         device_t                dev;
 1272 {
 1273         struct sk_softc         *sc;
 1274 
 1275         sc = device_get_softc(device_get_parent(dev));
 1276 
 1277         /*
 1278          * Not much to do here. We always know there will be
 1279          * at least one XMAC present, and if there are two,
 1280          * skc_attach() will create a second device instance
 1281          * for us.
 1282          */
 1283         switch (sc->sk_type) {
 1284         case SK_GENESIS:
 1285                 device_set_desc(dev, "XaQti Corp. XMAC II");
 1286                 break;
 1287         case SK_YUKON:
 1288         case SK_YUKON_LITE:
 1289         case SK_YUKON_LP:
 1290                 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
 1291                 break;
 1292         }
 1293 
 1294         return (BUS_PROBE_DEFAULT);
 1295 }
 1296 
 1297 /*
 1298  * Each XMAC chip is attached as a separate logical IP interface.
 1299  * Single port cards will have only one logical interface of course.
 1300  */
 1301 static int
 1302 sk_attach(dev)
 1303         device_t                dev;
 1304 {
 1305         struct sk_softc         *sc;
 1306         struct sk_if_softc      *sc_if;
 1307         struct ifnet            *ifp;
 1308         u_int32_t               r;
 1309         int                     error, i, phy, port;
 1310         u_char                  eaddr[6];
 1311         u_char                  inv_mac[] = {0, 0, 0, 0, 0, 0};
 1312 
 1313         if (dev == NULL)
 1314                 return(EINVAL);
 1315 
 1316         error = 0;
 1317         sc_if = device_get_softc(dev);
 1318         sc = device_get_softc(device_get_parent(dev));
 1319         port = *(int *)device_get_ivars(dev);
 1320 
 1321         sc_if->sk_if_dev = dev;
 1322         sc_if->sk_port = port;
 1323         sc_if->sk_softc = sc;
 1324         sc->sk_if[port] = sc_if;
 1325         if (port == SK_PORT_A)
 1326                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
 1327         if (port == SK_PORT_B)
 1328                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
 1329 
 1330         callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
 1331         callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
 1332 
 1333         if (sk_dma_alloc(sc_if) != 0) {
 1334                 error = ENOMEM;
 1335                 goto fail;
 1336         }
 1337         sk_dma_jumbo_alloc(sc_if);
 1338 
 1339         ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
 1340         if (ifp == NULL) {
 1341                 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
 1342                 error = ENOSPC;
 1343                 goto fail;
 1344         }
 1345         ifp->if_softc = sc_if;
 1346         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1347         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1348         /*
 1349          * SK_GENESIS has a bug in checksum offload - From linux.
 1350          */
 1351         if (sc_if->sk_softc->sk_type != SK_GENESIS) {
 1352                 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
 1353                 ifp->if_hwassist = 0;
 1354         } else {
 1355                 ifp->if_capabilities = 0;
 1356                 ifp->if_hwassist = 0;
 1357         }
 1358         ifp->if_capenable = ifp->if_capabilities;
 1359         /*
 1360          * Some revision of Yukon controller generates corrupted
 1361          * frame when TX checksum offloading is enabled.  The
 1362          * frame has a valid checksum value so payload might be
 1363          * modified during TX checksum calculation. Disable TX
 1364          * checksum offloading but give users chance to enable it
 1365          * when they know their controller works without problems
 1366          * with TX checksum offloading.
 1367          */
 1368         ifp->if_capenable &= ~IFCAP_TXCSUM;
 1369         ifp->if_ioctl = sk_ioctl;
 1370         ifp->if_start = sk_start;
 1371         ifp->if_init = sk_init;
 1372         IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
 1373         ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
 1374         IFQ_SET_READY(&ifp->if_snd);
 1375 
 1376         /*
 1377          * Get station address for this interface. Note that
 1378          * dual port cards actually come with three station
 1379          * addresses: one for each port, plus an extra. The
 1380          * extra one is used by the SysKonnect driver software
 1381          * as a 'virtual' station address for when both ports
 1382          * are operating in failover mode. Currently we don't
 1383          * use this extra address.
 1384          */
 1385         SK_IF_LOCK(sc_if);
 1386         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1387                 eaddr[i] =
 1388                     sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
 1389 
 1390         /* Verify whether the station address is invalid or not. */
 1391         if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
 1392                 device_printf(sc_if->sk_if_dev,
 1393                     "Generating random ethernet address\n");
 1394                 r = arc4random();
 1395                 /*
 1396                  * Set OUI to convenient locally assigned address.  'b'
 1397                  * is 0x62, which has the locally assigned bit set, and
 1398                  * the broadcast/multicast bit clear.
 1399                  */
 1400                 eaddr[0] = 'b';
 1401                 eaddr[1] = 's';
 1402                 eaddr[2] = 'd';
 1403                 eaddr[3] = (r >> 16) & 0xff;
 1404                 eaddr[4] = (r >>  8) & 0xff;
 1405                 eaddr[5] = (r >>  0) & 0xff;
 1406         }
 1407         /*
 1408          * Set up RAM buffer addresses. The NIC will have a certain
 1409          * amount of SRAM on it, somewhere between 512K and 2MB. We
 1410          * need to divide this up a) between the transmitter and
 1411          * receiver and b) between the two XMACs, if this is a
 1412          * dual port NIC. Our algotithm is to divide up the memory
 1413          * evenly so that everyone gets a fair share.
 1414          *
 1415          * Just to be contrary, Yukon2 appears to have separate memory
 1416          * for each MAC.
 1417          */
 1418         if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
 1419                 u_int32_t               chunk, val;
 1420 
 1421                 chunk = sc->sk_ramsize / 2;
 1422                 val = sc->sk_rboff / sizeof(u_int64_t);
 1423                 sc_if->sk_rx_ramstart = val;
 1424                 val += (chunk / sizeof(u_int64_t));
 1425                 sc_if->sk_rx_ramend = val - 1;
 1426                 sc_if->sk_tx_ramstart = val;
 1427                 val += (chunk / sizeof(u_int64_t));
 1428                 sc_if->sk_tx_ramend = val - 1;
 1429         } else {
 1430                 u_int32_t               chunk, val;
 1431 
 1432                 chunk = sc->sk_ramsize / 4;
 1433                 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
 1434                     sizeof(u_int64_t);
 1435                 sc_if->sk_rx_ramstart = val;
 1436                 val += (chunk / sizeof(u_int64_t));
 1437                 sc_if->sk_rx_ramend = val - 1;
 1438                 sc_if->sk_tx_ramstart = val;
 1439                 val += (chunk / sizeof(u_int64_t));
 1440                 sc_if->sk_tx_ramend = val - 1;
 1441         }
 1442 
 1443         /* Read and save PHY type and set PHY address */
 1444         sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
 1445         if (!SK_YUKON_FAMILY(sc->sk_type)) {
 1446                 switch(sc_if->sk_phytype) {
 1447                 case SK_PHYTYPE_XMAC:
 1448                         sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
 1449                         break;
 1450                 case SK_PHYTYPE_BCOM:
 1451                         sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
 1452                         break;
 1453                 default:
 1454                         device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
 1455                             sc_if->sk_phytype);
 1456                         error = ENODEV;
 1457                         SK_IF_UNLOCK(sc_if);
 1458                         goto fail;
 1459                 }
 1460         } else {
 1461                 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
 1462                     sc->sk_pmd != 'S') {
 1463                         /* not initialized, punt */
 1464                         sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
 1465                         sc->sk_coppertype = 1;
 1466                 }
 1467 
 1468                 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
 1469 
 1470                 if (!(sc->sk_coppertype))
 1471                         sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
 1472         }
 1473 
 1474         /*
 1475          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1476          */
 1477         SK_IF_UNLOCK(sc_if);
 1478         ether_ifattach(ifp, eaddr);
 1479         SK_IF_LOCK(sc_if);
 1480 
 1481         /*
 1482          * The hardware should be ready for VLAN_MTU by default:
 1483          * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
 1484          * YU_SMR_MFL_VLAN is set by this driver in Yukon.
 1485          *
 1486          */
 1487         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1488         ifp->if_capenable |= IFCAP_VLAN_MTU;
 1489         /*
 1490          * Tell the upper layer(s) we support long frames.
 1491          * Must appear after the call to ether_ifattach() because
 1492          * ether_ifattach() sets ifi_hdrlen to the default value.
 1493          */
 1494         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 1495 
 1496         /*
 1497          * Do miibus setup.
 1498          */
 1499         phy = MII_PHY_ANY;
 1500         switch (sc->sk_type) {
 1501         case SK_GENESIS:
 1502                 sk_init_xmac(sc_if);
 1503                 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
 1504                         phy = 0;
 1505                 break;
 1506         case SK_YUKON:
 1507         case SK_YUKON_LITE:
 1508         case SK_YUKON_LP:
 1509                 sk_init_yukon(sc_if);
 1510                 phy = 0;
 1511                 break;
 1512         }
 1513 
 1514         SK_IF_UNLOCK(sc_if);
 1515         error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
 1516             sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
 1517         if (error != 0) {
 1518                 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
 1519                 ether_ifdetach(ifp);
 1520                 goto fail;
 1521         }
 1522 
 1523 fail:
 1524         if (error) {
 1525                 /* Access should be ok even though lock has been dropped */
 1526                 sc->sk_if[port] = NULL;
 1527                 sk_detach(dev);
 1528         }
 1529 
 1530         return(error);
 1531 }
 1532 
 1533 /*
 1534  * Attach the interface. Allocate softc structures, do ifmedia
 1535  * setup and ethernet/BPF attach.
 1536  */
 1537 static int
 1538 skc_attach(dev)
 1539         device_t                dev;
 1540 {
 1541         struct sk_softc         *sc;
 1542         int                     error = 0, *port;
 1543         uint8_t                 skrs;
 1544         const char              *pname = NULL;
 1545         char                    *revstr;
 1546 
 1547         sc = device_get_softc(dev);
 1548         sc->sk_dev = dev;
 1549 
 1550         mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1551             MTX_DEF);
 1552         mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
 1553         /*
 1554          * Map control/status registers.
 1555          */
 1556         pci_enable_busmaster(dev);
 1557 
 1558         /* Allocate resources */
 1559 #ifdef SK_USEIOSPACE
 1560         sc->sk_res_spec = sk_res_spec_io;
 1561 #else
 1562         sc->sk_res_spec = sk_res_spec_mem;
 1563 #endif
 1564         error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
 1565         if (error) {
 1566                 if (sc->sk_res_spec == sk_res_spec_mem)
 1567                         sc->sk_res_spec = sk_res_spec_io;
 1568                 else
 1569                         sc->sk_res_spec = sk_res_spec_mem;
 1570                 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
 1571                 if (error) {
 1572                         device_printf(dev, "couldn't allocate %s resources\n",
 1573                             sc->sk_res_spec == sk_res_spec_mem ? "memory" :
 1574                             "I/O");
 1575                         goto fail;
 1576                 }
 1577         }
 1578 
 1579         sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
 1580         sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
 1581 
 1582         /* Bail out if chip is not recognized. */
 1583         if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
 1584                 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
 1585                     sc->sk_type, sc->sk_rev);
 1586                 error = ENXIO;
 1587                 goto fail;
 1588         }
 1589 
 1590         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1591                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1592                 OID_AUTO, "int_mod",
 1593                 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
 1594                 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
 1595                 "SK interrupt moderation");
 1596 
 1597         /* Pull in device tunables. */
 1598         sc->sk_int_mod = SK_IM_DEFAULT;
 1599         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1600                 "int_mod", &sc->sk_int_mod);
 1601         if (error == 0) {
 1602                 if (sc->sk_int_mod < SK_IM_MIN ||
 1603                     sc->sk_int_mod > SK_IM_MAX) {
 1604                         device_printf(dev, "int_mod value out of range; "
 1605                             "using default: %d\n", SK_IM_DEFAULT);
 1606                         sc->sk_int_mod = SK_IM_DEFAULT;
 1607                 }
 1608         }
 1609 
 1610         /* Reset the adapter. */
 1611         sk_reset(sc);
 1612 
 1613         skrs = sk_win_read_1(sc, SK_EPROM0);
 1614         if (sc->sk_type == SK_GENESIS) {
 1615                 /* Read and save RAM size and RAMbuffer offset */
 1616                 switch(skrs) {
 1617                 case SK_RAMSIZE_512K_64:
 1618                         sc->sk_ramsize = 0x80000;
 1619                         sc->sk_rboff = SK_RBOFF_0;
 1620                         break;
 1621                 case SK_RAMSIZE_1024K_64:
 1622                         sc->sk_ramsize = 0x100000;
 1623                         sc->sk_rboff = SK_RBOFF_80000;
 1624                         break;
 1625                 case SK_RAMSIZE_1024K_128:
 1626                         sc->sk_ramsize = 0x100000;
 1627                         sc->sk_rboff = SK_RBOFF_0;
 1628                         break;
 1629                 case SK_RAMSIZE_2048K_128:
 1630                         sc->sk_ramsize = 0x200000;
 1631                         sc->sk_rboff = SK_RBOFF_0;
 1632                         break;
 1633                 default:
 1634                         device_printf(dev, "unknown ram size: %d\n", skrs);
 1635                         error = ENXIO;
 1636                         goto fail;
 1637                 }
 1638         } else { /* SK_YUKON_FAMILY */
 1639                 if (skrs == 0x00)
 1640                         sc->sk_ramsize = 0x20000;
 1641                 else
 1642                         sc->sk_ramsize = skrs * (1<<12);
 1643                 sc->sk_rboff = SK_RBOFF_0;
 1644         }
 1645 
 1646         /* Read and save physical media type */
 1647          sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
 1648 
 1649          if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
 1650                  sc->sk_coppertype = 1;
 1651          else
 1652                  sc->sk_coppertype = 0;
 1653 
 1654         /* Determine whether to name it with VPD PN or just make it up.
 1655          * Marvell Yukon VPD PN seems to freqently be bogus. */
 1656         switch (pci_get_device(dev)) {
 1657         case DEVICEID_SK_V1:
 1658         case DEVICEID_BELKIN_5005:
 1659         case DEVICEID_3COM_3C940:
 1660         case DEVICEID_LINKSYS_EG1032:
 1661         case DEVICEID_DLINK_DGE530T_A1:
 1662         case DEVICEID_DLINK_DGE530T_B1:
 1663                 /* Stay with VPD PN. */
 1664                 (void) pci_get_vpd_ident(dev, &pname);
 1665                 break;
 1666         case DEVICEID_SK_V2:
 1667                 /* YUKON VPD PN might bear no resemblance to reality. */
 1668                 switch (sc->sk_type) {
 1669                 case SK_GENESIS:
 1670                         /* Stay with VPD PN. */
 1671                         (void) pci_get_vpd_ident(dev, &pname);
 1672                         break;
 1673                 case SK_YUKON:
 1674                         pname = "Marvell Yukon Gigabit Ethernet";
 1675                         break;
 1676                 case SK_YUKON_LITE:
 1677                         pname = "Marvell Yukon Lite Gigabit Ethernet";
 1678                         break;
 1679                 case SK_YUKON_LP:
 1680                         pname = "Marvell Yukon LP Gigabit Ethernet";
 1681                         break;
 1682                 default:
 1683                         pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
 1684                         break;
 1685                 }
 1686 
 1687                 /* Yukon Lite Rev. A0 needs special test. */
 1688                 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
 1689                         u_int32_t far;
 1690                         u_int8_t testbyte;
 1691 
 1692                         /* Save flash address register before testing. */
 1693                         far = sk_win_read_4(sc, SK_EP_ADDR);
 1694 
 1695                         sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
 1696                         testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
 1697 
 1698                         if (testbyte != 0x00) {
 1699                                 /* Yukon Lite Rev. A0 detected. */
 1700                                 sc->sk_type = SK_YUKON_LITE;
 1701                                 sc->sk_rev = SK_YUKON_LITE_REV_A0;
 1702                                 /* Restore flash address register. */
 1703                                 sk_win_write_4(sc, SK_EP_ADDR, far);
 1704                         }
 1705                 }
 1706                 break;
 1707         default:
 1708                 device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
 1709                         "chipver=%02x, rev=%x\n",
 1710                         pci_get_vendor(dev), pci_get_device(dev),
 1711                         sc->sk_type, sc->sk_rev);
 1712                 error = ENXIO;
 1713                 goto fail;
 1714         }
 1715 
 1716         if (sc->sk_type == SK_YUKON_LITE) {
 1717                 switch (sc->sk_rev) {
 1718                 case SK_YUKON_LITE_REV_A0:
 1719                         revstr = "A0";
 1720                         break;
 1721                 case SK_YUKON_LITE_REV_A1:
 1722                         revstr = "A1";
 1723                         break;
 1724                 case SK_YUKON_LITE_REV_A3:
 1725                         revstr = "A3";
 1726                         break;
 1727                 default:
 1728                         revstr = "";
 1729                         break;
 1730                 }
 1731         } else {
 1732                 revstr = "";
 1733         }
 1734 
 1735         /* Announce the product name and more VPD data if there. */
 1736         if (pname != NULL)
 1737                 device_printf(dev, "%s rev. %s(0x%x)\n",
 1738                         pname, revstr, sc->sk_rev);
 1739 
 1740         if (bootverbose) {
 1741                 device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
 1742                 device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
 1743                 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
 1744                 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
 1745         }
 1746 
 1747         sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
 1748         if (sc->sk_devs[SK_PORT_A] == NULL) {
 1749                 device_printf(dev, "failed to add child for PORT_A\n");
 1750                 error = ENXIO;
 1751                 goto fail;
 1752         }
 1753         port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
 1754         if (port == NULL) {
 1755                 device_printf(dev, "failed to allocate memory for "
 1756                     "ivars of PORT_A\n");
 1757                 error = ENXIO;
 1758                 goto fail;
 1759         }
 1760         *port = SK_PORT_A;
 1761         device_set_ivars(sc->sk_devs[SK_PORT_A], port);
 1762 
 1763         if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
 1764                 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
 1765                 if (sc->sk_devs[SK_PORT_B] == NULL) {
 1766                         device_printf(dev, "failed to add child for PORT_B\n");
 1767                         error = ENXIO;
 1768                         goto fail;
 1769                 }
 1770                 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
 1771                 if (port == NULL) {
 1772                         device_printf(dev, "failed to allocate memory for "
 1773                             "ivars of PORT_B\n");
 1774                         error = ENXIO;
 1775                         goto fail;
 1776                 }
 1777                 *port = SK_PORT_B;
 1778                 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
 1779         }
 1780 
 1781         /* Turn on the 'driver is loaded' LED. */
 1782         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
 1783 
 1784         error = bus_generic_attach(dev);
 1785         if (error) {
 1786                 device_printf(dev, "failed to attach port(s)\n");
 1787                 goto fail;
 1788         }
 1789 
 1790         /* Hook interrupt last to avoid having to lock softc */
 1791         error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
 1792             NULL, sk_intr, sc, &sc->sk_intrhand);
 1793 
 1794         if (error) {
 1795                 device_printf(dev, "couldn't set up irq\n");
 1796                 goto fail;
 1797         }
 1798 
 1799 fail:
 1800         if (error)
 1801                 skc_detach(dev);
 1802 
 1803         return(error);
 1804 }
 1805 
 1806 /*
 1807  * Shutdown hardware and free up resources. This can be called any
 1808  * time after the mutex has been initialized. It is called in both
 1809  * the error case in attach and the normal detach case so it needs
 1810  * to be careful about only freeing resources that have actually been
 1811  * allocated.
 1812  */
 1813 static int
 1814 sk_detach(dev)
 1815         device_t                dev;
 1816 {
 1817         struct sk_if_softc      *sc_if;
 1818         struct ifnet            *ifp;
 1819 
 1820         sc_if = device_get_softc(dev);
 1821         KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
 1822             ("sk mutex not initialized in sk_detach"));
 1823         SK_IF_LOCK(sc_if);
 1824 
 1825         ifp = sc_if->sk_ifp;
 1826         /* These should only be active if attach_xmac succeeded */
 1827         if (device_is_attached(dev)) {
 1828                 sk_stop(sc_if);
 1829                 /* Can't hold locks while calling detach */
 1830                 SK_IF_UNLOCK(sc_if);
 1831                 callout_drain(&sc_if->sk_tick_ch);
 1832                 callout_drain(&sc_if->sk_watchdog_ch);
 1833                 ether_ifdetach(ifp);
 1834                 SK_IF_LOCK(sc_if);
 1835         }
 1836         /*
 1837          * We're generally called from skc_detach() which is using
 1838          * device_delete_child() to get to here. It's already trashed
 1839          * miibus for us, so don't do it here or we'll panic.
 1840          */
 1841         /*
 1842         if (sc_if->sk_miibus != NULL)
 1843                 device_delete_child(dev, sc_if->sk_miibus);
 1844         */
 1845         bus_generic_detach(dev);
 1846         sk_dma_jumbo_free(sc_if);
 1847         sk_dma_free(sc_if);
 1848         SK_IF_UNLOCK(sc_if);
 1849         if (ifp)
 1850                 if_free(ifp);
 1851 
 1852         return(0);
 1853 }
 1854 
 1855 static int
 1856 skc_detach(dev)
 1857         device_t                dev;
 1858 {
 1859         struct sk_softc         *sc;
 1860 
 1861         sc = device_get_softc(dev);
 1862         KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
 1863 
 1864         if (device_is_alive(dev)) {
 1865                 if (sc->sk_devs[SK_PORT_A] != NULL) {
 1866                         free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
 1867                         device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
 1868                 }
 1869                 if (sc->sk_devs[SK_PORT_B] != NULL) {
 1870                         free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
 1871                         device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
 1872                 }
 1873                 bus_generic_detach(dev);
 1874         }
 1875 
 1876         if (sc->sk_intrhand)
 1877                 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
 1878         bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
 1879 
 1880         mtx_destroy(&sc->sk_mii_mtx);
 1881         mtx_destroy(&sc->sk_mtx);
 1882 
 1883         return(0);
 1884 }
 1885 
 1886 static bus_dma_tag_t
 1887 skc_get_dma_tag(device_t bus, device_t child __unused)
 1888 {
 1889 
 1890         return (bus_get_dma_tag(bus));
 1891 }
 1892 
 1893 struct sk_dmamap_arg {
 1894         bus_addr_t      sk_busaddr;
 1895 };
 1896 
 1897 static void
 1898 sk_dmamap_cb(arg, segs, nseg, error)
 1899         void                    *arg;
 1900         bus_dma_segment_t       *segs;
 1901         int                     nseg;
 1902         int                     error;
 1903 {
 1904         struct sk_dmamap_arg    *ctx;
 1905 
 1906         if (error != 0)
 1907                 return;
 1908 
 1909         ctx = arg;
 1910         ctx->sk_busaddr = segs[0].ds_addr;
 1911 }
 1912 
 1913 /*
 1914  * Allocate jumbo buffer storage. The SysKonnect adapters support
 1915  * "jumbograms" (9K frames), although SysKonnect doesn't currently
 1916  * use them in their drivers. In order for us to use them, we need
 1917  * large 9K receive buffers, however standard mbuf clusters are only
 1918  * 2048 bytes in size. Consequently, we need to allocate and manage
 1919  * our own jumbo buffer pool. Fortunately, this does not require an
 1920  * excessive amount of additional code.
 1921  */
 1922 static int
 1923 sk_dma_alloc(sc_if)
 1924         struct sk_if_softc      *sc_if;
 1925 {
 1926         struct sk_dmamap_arg    ctx;
 1927         struct sk_txdesc        *txd;
 1928         struct sk_rxdesc        *rxd;
 1929         int                     error, i;
 1930 
 1931         /* create parent tag */
 1932         /*
 1933          * XXX
 1934          * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
 1935          * in bus_dma_tag_create(9) as the NIC would support DAC mode.
 1936          * However bz@ reported that it does not work on amd64 with > 4GB
 1937          * RAM. Until we have more clues of the breakage, disable DAC mode
 1938          * by limiting DMA address to be in 32bit address space.
 1939          */
 1940         error = bus_dma_tag_create(
 1941                     bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
 1942                     1, 0,                       /* algnmnt, boundary */
 1943                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1944                     BUS_SPACE_MAXADDR,          /* highaddr */
 1945                     NULL, NULL,                 /* filter, filterarg */
 1946                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1947                     0,                          /* nsegments */
 1948                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1949                     0,                          /* flags */
 1950                     NULL, NULL,                 /* lockfunc, lockarg */
 1951                     &sc_if->sk_cdata.sk_parent_tag);
 1952         if (error != 0) {
 1953                 device_printf(sc_if->sk_if_dev,
 1954                     "failed to create parent DMA tag\n");
 1955                 goto fail;
 1956         }
 1957 
 1958         /* create tag for Tx ring */
 1959         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1960                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 1961                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1962                     BUS_SPACE_MAXADDR,          /* highaddr */
 1963                     NULL, NULL,                 /* filter, filterarg */
 1964                     SK_TX_RING_SZ,              /* maxsize */
 1965                     1,                          /* nsegments */
 1966                     SK_TX_RING_SZ,              /* maxsegsize */
 1967                     0,                          /* flags */
 1968                     NULL, NULL,                 /* lockfunc, lockarg */
 1969                     &sc_if->sk_cdata.sk_tx_ring_tag);
 1970         if (error != 0) {
 1971                 device_printf(sc_if->sk_if_dev,
 1972                     "failed to allocate Tx ring DMA tag\n");
 1973                 goto fail;
 1974         }
 1975 
 1976         /* create tag for Rx ring */
 1977         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1978                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 1979                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1980                     BUS_SPACE_MAXADDR,          /* highaddr */
 1981                     NULL, NULL,                 /* filter, filterarg */
 1982                     SK_RX_RING_SZ,              /* maxsize */
 1983                     1,                          /* nsegments */
 1984                     SK_RX_RING_SZ,              /* maxsegsize */
 1985                     0,                          /* flags */
 1986                     NULL, NULL,                 /* lockfunc, lockarg */
 1987                     &sc_if->sk_cdata.sk_rx_ring_tag);
 1988         if (error != 0) {
 1989                 device_printf(sc_if->sk_if_dev,
 1990                     "failed to allocate Rx ring DMA tag\n");
 1991                 goto fail;
 1992         }
 1993 
 1994         /* create tag for Tx buffers */
 1995         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 1996                     1, 0,                       /* algnmnt, boundary */
 1997                     BUS_SPACE_MAXADDR,          /* lowaddr */
 1998                     BUS_SPACE_MAXADDR,          /* highaddr */
 1999                     NULL, NULL,                 /* filter, filterarg */
 2000                     MCLBYTES * SK_MAXTXSEGS,    /* maxsize */
 2001                     SK_MAXTXSEGS,               /* nsegments */
 2002                     MCLBYTES,                   /* maxsegsize */
 2003                     0,                          /* flags */
 2004                     NULL, NULL,                 /* lockfunc, lockarg */
 2005                     &sc_if->sk_cdata.sk_tx_tag);
 2006         if (error != 0) {
 2007                 device_printf(sc_if->sk_if_dev,
 2008                     "failed to allocate Tx DMA tag\n");
 2009                 goto fail;
 2010         }
 2011 
 2012         /* create tag for Rx buffers */
 2013         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2014                     1, 0,                       /* algnmnt, boundary */
 2015                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2016                     BUS_SPACE_MAXADDR,          /* highaddr */
 2017                     NULL, NULL,                 /* filter, filterarg */
 2018                     MCLBYTES,                   /* maxsize */
 2019                     1,                          /* nsegments */
 2020                     MCLBYTES,                   /* maxsegsize */
 2021                     0,                          /* flags */
 2022                     NULL, NULL,                 /* lockfunc, lockarg */
 2023                     &sc_if->sk_cdata.sk_rx_tag);
 2024         if (error != 0) {
 2025                 device_printf(sc_if->sk_if_dev,
 2026                     "failed to allocate Rx DMA tag\n");
 2027                 goto fail;
 2028         }
 2029 
 2030         /* allocate DMA'able memory and load the DMA map for Tx ring */
 2031         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
 2032             (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
 2033             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
 2034         if (error != 0) {
 2035                 device_printf(sc_if->sk_if_dev,
 2036                     "failed to allocate DMA'able memory for Tx ring\n");
 2037                 goto fail;
 2038         }
 2039 
 2040         ctx.sk_busaddr = 0;
 2041         error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
 2042             sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
 2043             SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2044         if (error != 0) {
 2045                 device_printf(sc_if->sk_if_dev,
 2046                     "failed to load DMA'able memory for Tx ring\n");
 2047                 goto fail;
 2048         }
 2049         sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
 2050 
 2051         /* allocate DMA'able memory and load the DMA map for Rx ring */
 2052         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
 2053             (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
 2054             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
 2055         if (error != 0) {
 2056                 device_printf(sc_if->sk_if_dev,
 2057                     "failed to allocate DMA'able memory for Rx ring\n");
 2058                 goto fail;
 2059         }
 2060 
 2061         ctx.sk_busaddr = 0;
 2062         error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
 2063             sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
 2064             SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2065         if (error != 0) {
 2066                 device_printf(sc_if->sk_if_dev,
 2067                     "failed to load DMA'able memory for Rx ring\n");
 2068                 goto fail;
 2069         }
 2070         sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
 2071 
 2072         /* create DMA maps for Tx buffers */
 2073         for (i = 0; i < SK_TX_RING_CNT; i++) {
 2074                 txd = &sc_if->sk_cdata.sk_txdesc[i];
 2075                 txd->tx_m = NULL;
 2076                 txd->tx_dmamap = NULL;
 2077                 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
 2078                     &txd->tx_dmamap);
 2079                 if (error != 0) {
 2080                         device_printf(sc_if->sk_if_dev,
 2081                             "failed to create Tx dmamap\n");
 2082                         goto fail;
 2083                 }
 2084         }
 2085 
 2086         /* create DMA maps for Rx buffers */
 2087         if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
 2088             &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
 2089                 device_printf(sc_if->sk_if_dev,
 2090                     "failed to create spare Rx dmamap\n");
 2091                 goto fail;
 2092         }
 2093         for (i = 0; i < SK_RX_RING_CNT; i++) {
 2094                 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 2095                 rxd->rx_m = NULL;
 2096                 rxd->rx_dmamap = NULL;
 2097                 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
 2098                     &rxd->rx_dmamap);
 2099                 if (error != 0) {
 2100                         device_printf(sc_if->sk_if_dev,
 2101                             "failed to create Rx dmamap\n");
 2102                         goto fail;
 2103                 }
 2104         }
 2105 
 2106 fail:
 2107         return (error);
 2108 }
 2109 
 2110 static int
 2111 sk_dma_jumbo_alloc(sc_if)
 2112         struct sk_if_softc      *sc_if;
 2113 {
 2114         struct sk_dmamap_arg    ctx;
 2115         struct sk_rxdesc        *jrxd;
 2116         int                     error, i;
 2117 
 2118         if (jumbo_disable != 0) {
 2119                 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
 2120                 sc_if->sk_jumbo_disable = 1;
 2121                 return (0);
 2122         }
 2123         /* create tag for jumbo Rx ring */
 2124         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2125                     SK_RING_ALIGN, 0,           /* algnmnt, boundary */
 2126                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 2127                     BUS_SPACE_MAXADDR,          /* highaddr */
 2128                     NULL, NULL,                 /* filter, filterarg */
 2129                     SK_JUMBO_RX_RING_SZ,        /* maxsize */
 2130                     1,                          /* nsegments */
 2131                     SK_JUMBO_RX_RING_SZ,        /* maxsegsize */
 2132                     0,                          /* flags */
 2133                     NULL, NULL,                 /* lockfunc, lockarg */
 2134                     &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
 2135         if (error != 0) {
 2136                 device_printf(sc_if->sk_if_dev,
 2137                     "failed to allocate jumbo Rx ring DMA tag\n");
 2138                 goto jumbo_fail;
 2139         }
 2140 
 2141         /* create tag for jumbo Rx buffers */
 2142         error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
 2143                     1, 0,                       /* algnmnt, boundary */
 2144                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2145                     BUS_SPACE_MAXADDR,          /* highaddr */
 2146                     NULL, NULL,                 /* filter, filterarg */
 2147                     MJUM9BYTES,                 /* maxsize */
 2148                     1,                          /* nsegments */
 2149                     MJUM9BYTES,                 /* maxsegsize */
 2150                     0,                          /* flags */
 2151                     NULL, NULL,                 /* lockfunc, lockarg */
 2152                     &sc_if->sk_cdata.sk_jumbo_rx_tag);
 2153         if (error != 0) {
 2154                 device_printf(sc_if->sk_if_dev,
 2155                     "failed to allocate jumbo Rx DMA tag\n");
 2156                 goto jumbo_fail;
 2157         }
 2158 
 2159         /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
 2160         error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2161             (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
 2162             BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2163             &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2164         if (error != 0) {
 2165                 device_printf(sc_if->sk_if_dev,
 2166                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2167                 goto jumbo_fail;
 2168         }
 2169 
 2170         ctx.sk_busaddr = 0;
 2171         error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2172             sc_if->sk_cdata.sk_jumbo_rx_ring_map,
 2173             sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
 2174             &ctx, BUS_DMA_NOWAIT);
 2175         if (error != 0) {
 2176                 device_printf(sc_if->sk_if_dev,
 2177                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2178                 goto jumbo_fail;
 2179         }
 2180         sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
 2181 
 2182         /* create DMA maps for jumbo Rx buffers */
 2183         if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
 2184             &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
 2185                 device_printf(sc_if->sk_if_dev,
 2186                     "failed to create spare jumbo Rx dmamap\n");
 2187                 goto jumbo_fail;
 2188         }
 2189         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 2190                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 2191                 jrxd->rx_m = NULL;
 2192                 jrxd->rx_dmamap = NULL;
 2193                 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
 2194                     &jrxd->rx_dmamap);
 2195                 if (error != 0) {
 2196                         device_printf(sc_if->sk_if_dev,
 2197                             "failed to create jumbo Rx dmamap\n");
 2198                         goto jumbo_fail;
 2199                 }
 2200         }
 2201 
 2202         return (0);
 2203 
 2204 jumbo_fail:
 2205         sk_dma_jumbo_free(sc_if);
 2206         device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
 2207             "resource shortage\n");
 2208         sc_if->sk_jumbo_disable = 1;
 2209         return (0);
 2210 }
 2211 
 2212 static void
 2213 sk_dma_free(sc_if)
 2214         struct sk_if_softc      *sc_if;
 2215 {
 2216         struct sk_txdesc        *txd;
 2217         struct sk_rxdesc        *rxd;
 2218         int                     i;
 2219 
 2220         /* Tx ring */
 2221         if (sc_if->sk_cdata.sk_tx_ring_tag) {
 2222                 if (sc_if->sk_rdata.sk_tx_ring_paddr)
 2223                         bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
 2224                             sc_if->sk_cdata.sk_tx_ring_map);
 2225                 if (sc_if->sk_rdata.sk_tx_ring)
 2226                         bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
 2227                             sc_if->sk_rdata.sk_tx_ring,
 2228                             sc_if->sk_cdata.sk_tx_ring_map);
 2229                 sc_if->sk_rdata.sk_tx_ring = NULL;
 2230                 sc_if->sk_rdata.sk_tx_ring_paddr = 0;
 2231                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
 2232                 sc_if->sk_cdata.sk_tx_ring_tag = NULL;
 2233         }
 2234         /* Rx ring */
 2235         if (sc_if->sk_cdata.sk_rx_ring_tag) {
 2236                 if (sc_if->sk_rdata.sk_rx_ring_paddr)
 2237                         bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
 2238                             sc_if->sk_cdata.sk_rx_ring_map);
 2239                 if (sc_if->sk_rdata.sk_rx_ring)
 2240                         bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
 2241                             sc_if->sk_rdata.sk_rx_ring,
 2242                             sc_if->sk_cdata.sk_rx_ring_map);
 2243                 sc_if->sk_rdata.sk_rx_ring = NULL;
 2244                 sc_if->sk_rdata.sk_rx_ring_paddr = 0;
 2245                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
 2246                 sc_if->sk_cdata.sk_rx_ring_tag = NULL;
 2247         }
 2248         /* Tx buffers */
 2249         if (sc_if->sk_cdata.sk_tx_tag) {
 2250                 for (i = 0; i < SK_TX_RING_CNT; i++) {
 2251                         txd = &sc_if->sk_cdata.sk_txdesc[i];
 2252                         if (txd->tx_dmamap) {
 2253                                 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
 2254                                     txd->tx_dmamap);
 2255                                 txd->tx_dmamap = NULL;
 2256                         }
 2257                 }
 2258                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
 2259                 sc_if->sk_cdata.sk_tx_tag = NULL;
 2260         }
 2261         /* Rx buffers */
 2262         if (sc_if->sk_cdata.sk_rx_tag) {
 2263                 for (i = 0; i < SK_RX_RING_CNT; i++) {
 2264                         rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 2265                         if (rxd->rx_dmamap) {
 2266                                 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
 2267                                     rxd->rx_dmamap);
 2268                                 rxd->rx_dmamap = NULL;
 2269                         }
 2270                 }
 2271                 if (sc_if->sk_cdata.sk_rx_sparemap) {
 2272                         bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
 2273                             sc_if->sk_cdata.sk_rx_sparemap);
 2274                         sc_if->sk_cdata.sk_rx_sparemap = NULL;
 2275                 }
 2276                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
 2277                 sc_if->sk_cdata.sk_rx_tag = NULL;
 2278         }
 2279 
 2280         if (sc_if->sk_cdata.sk_parent_tag) {
 2281                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
 2282                 sc_if->sk_cdata.sk_parent_tag = NULL;
 2283         }
 2284 }
 2285 
 2286 static void
 2287 sk_dma_jumbo_free(sc_if)
 2288         struct sk_if_softc      *sc_if;
 2289 {
 2290         struct sk_rxdesc        *jrxd;
 2291         int                     i;
 2292 
 2293         /* jumbo Rx ring */
 2294         if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
 2295                 if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr)
 2296                         bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2297                             sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2298                 if (sc_if->sk_rdata.sk_jumbo_rx_ring)
 2299                         bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2300                             sc_if->sk_rdata.sk_jumbo_rx_ring,
 2301                             sc_if->sk_cdata.sk_jumbo_rx_ring_map);
 2302                 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
 2303                 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0;
 2304                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
 2305                 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
 2306         }
 2307 
 2308         /* jumbo Rx buffers */
 2309         if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
 2310                 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 2311                         jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 2312                         if (jrxd->rx_dmamap) {
 2313                                 bus_dmamap_destroy(
 2314                                     sc_if->sk_cdata.sk_jumbo_rx_tag,
 2315                                     jrxd->rx_dmamap);
 2316                                 jrxd->rx_dmamap = NULL;
 2317                         }
 2318                 }
 2319                 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
 2320                         bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
 2321                             sc_if->sk_cdata.sk_jumbo_rx_sparemap);
 2322                         sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
 2323                 }
 2324                 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
 2325                 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
 2326         }
 2327 }
 2328 
 2329 static void
 2330 sk_txcksum(ifp, m, f)
 2331         struct ifnet            *ifp;
 2332         struct mbuf             *m;
 2333         struct sk_tx_desc       *f;
 2334 {
 2335         struct ip               *ip;
 2336         u_int16_t               offset;
 2337         u_int8_t                *p;
 2338 
 2339         offset = sizeof(struct ip) + ETHER_HDR_LEN;
 2340         for(; m && m->m_len == 0; m = m->m_next)
 2341                 ;
 2342         if (m == NULL || m->m_len < ETHER_HDR_LEN) {
 2343                 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
 2344                 /* checksum may be corrupted */
 2345                 goto sendit;
 2346         }
 2347         if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
 2348                 if (m->m_len != ETHER_HDR_LEN) {
 2349                         if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
 2350                             __func__);
 2351                         /* checksum may be corrupted */
 2352                         goto sendit;
 2353                 }
 2354                 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
 2355                         ;
 2356                 if (m == NULL) {
 2357                         offset = sizeof(struct ip) + ETHER_HDR_LEN;
 2358                         /* checksum may be corrupted */
 2359                         goto sendit;
 2360                 }
 2361                 ip = mtod(m, struct ip *);
 2362         } else {
 2363                 p = mtod(m, u_int8_t *);
 2364                 p += ETHER_HDR_LEN;
 2365                 ip = (struct ip *)p;
 2366         }
 2367         offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
 2368 
 2369 sendit:
 2370         f->sk_csum_startval = 0;
 2371         f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
 2372             (offset << 16));
 2373 }
 2374 
 2375 static int
 2376 sk_encap(sc_if, m_head)
 2377         struct sk_if_softc      *sc_if;
 2378         struct mbuf             **m_head;
 2379 {
 2380         struct sk_txdesc        *txd;
 2381         struct sk_tx_desc       *f = NULL;
 2382         struct mbuf             *m;
 2383         bus_dma_segment_t       txsegs[SK_MAXTXSEGS];
 2384         u_int32_t               cflags, frag, si, sk_ctl;
 2385         int                     error, i, nseg;
 2386 
 2387         SK_IF_LOCK_ASSERT(sc_if);
 2388 
 2389         if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
 2390                 return (ENOBUFS);
 2391 
 2392         error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
 2393             txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
 2394         if (error == EFBIG) {
 2395                 m = m_defrag(*m_head, M_NOWAIT);
 2396                 if (m == NULL) {
 2397                         m_freem(*m_head);
 2398                         *m_head = NULL;
 2399                         return (ENOMEM);
 2400                 }
 2401                 *m_head = m;
 2402                 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
 2403                     txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
 2404                 if (error != 0) {
 2405                         m_freem(*m_head);
 2406                         *m_head = NULL;
 2407                         return (error);
 2408                 }
 2409         } else if (error != 0)
 2410                 return (error);
 2411         if (nseg == 0) {
 2412                 m_freem(*m_head);
 2413                 *m_head = NULL;
 2414                 return (EIO);
 2415         }
 2416         if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
 2417                 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
 2418                 return (ENOBUFS);
 2419         }
 2420 
 2421         m = *m_head;
 2422         if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
 2423                 cflags = SK_OPCODE_CSUM;
 2424         else
 2425                 cflags = SK_OPCODE_DEFAULT;
 2426         si = frag = sc_if->sk_cdata.sk_tx_prod;
 2427         for (i = 0; i < nseg; i++) {
 2428                 f = &sc_if->sk_rdata.sk_tx_ring[frag];
 2429                 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
 2430                 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
 2431                 sk_ctl = txsegs[i].ds_len | cflags;
 2432                 if (i == 0) {
 2433                         if (cflags == SK_OPCODE_CSUM)
 2434                                 sk_txcksum(sc_if->sk_ifp, m, f);
 2435                         sk_ctl |= SK_TXCTL_FIRSTFRAG;
 2436                 } else
 2437                         sk_ctl |= SK_TXCTL_OWN;
 2438                 f->sk_ctl = htole32(sk_ctl);
 2439                 sc_if->sk_cdata.sk_tx_cnt++;
 2440                 SK_INC(frag, SK_TX_RING_CNT);
 2441         }
 2442         sc_if->sk_cdata.sk_tx_prod = frag;
 2443 
 2444         /* set EOF on the last descriptor */
 2445         frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
 2446         f = &sc_if->sk_rdata.sk_tx_ring[frag];
 2447         f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
 2448 
 2449         /* turn the first descriptor ownership to NIC */
 2450         f = &sc_if->sk_rdata.sk_tx_ring[si];
 2451         f->sk_ctl |= htole32(SK_TXCTL_OWN);
 2452 
 2453         STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
 2454         STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
 2455         txd->tx_m = m;
 2456 
 2457         /* sync descriptors */
 2458         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
 2459             BUS_DMASYNC_PREWRITE);
 2460         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2461             sc_if->sk_cdata.sk_tx_ring_map,
 2462             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2463 
 2464         return (0);
 2465 }
 2466 
 2467 static void
 2468 sk_start(ifp)
 2469         struct ifnet            *ifp;
 2470 {
 2471         struct sk_if_softc *sc_if;
 2472 
 2473         sc_if = ifp->if_softc;
 2474 
 2475         SK_IF_LOCK(sc_if);
 2476         sk_start_locked(ifp);
 2477         SK_IF_UNLOCK(sc_if);
 2478 
 2479         return;
 2480 }
 2481 
 2482 static void
 2483 sk_start_locked(ifp)
 2484         struct ifnet            *ifp;
 2485 {
 2486         struct sk_softc         *sc;
 2487         struct sk_if_softc      *sc_if;
 2488         struct mbuf             *m_head;
 2489         int                     enq;
 2490 
 2491         sc_if = ifp->if_softc;
 2492         sc = sc_if->sk_softc;
 2493 
 2494         SK_IF_LOCK_ASSERT(sc_if);
 2495 
 2496         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2497             sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
 2498                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2499                 if (m_head == NULL)
 2500                         break;
 2501 
 2502                 /*
 2503                  * Pack the data into the transmit ring. If we
 2504                  * don't have room, set the OACTIVE flag and wait
 2505                  * for the NIC to drain the ring.
 2506                  */
 2507                 if (sk_encap(sc_if, &m_head)) {
 2508                         if (m_head == NULL)
 2509                                 break;
 2510                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2511                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2512                         break;
 2513                 }
 2514 
 2515                 enq++;
 2516                 /*
 2517                  * If there's a BPF listener, bounce a copy of this frame
 2518                  * to him.
 2519                  */
 2520                 BPF_MTAP(ifp, m_head);
 2521         }
 2522 
 2523         if (enq > 0) {
 2524                 /* Transmit */
 2525                 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
 2526 
 2527                 /* Set a timeout in case the chip goes out to lunch. */
 2528                 sc_if->sk_watchdog_timer = 5;
 2529         }
 2530 }
 2531 
 2532 static void
 2533 sk_watchdog(arg)
 2534         void                    *arg;
 2535 {
 2536         struct sk_if_softc      *sc_if;
 2537         struct ifnet            *ifp;
 2538 
 2539         ifp = arg;
 2540         sc_if = ifp->if_softc;
 2541 
 2542         SK_IF_LOCK_ASSERT(sc_if);
 2543 
 2544         if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
 2545                 goto done;
 2546 
 2547         /*
 2548          * Reclaim first as there is a possibility of losing Tx completion
 2549          * interrupts.
 2550          */
 2551         sk_txeof(sc_if);
 2552         if (sc_if->sk_cdata.sk_tx_cnt != 0) {
 2553                 if_printf(sc_if->sk_ifp, "watchdog timeout\n");
 2554                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2555                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2556                 sk_init_locked(sc_if);
 2557         }
 2558 
 2559 done:
 2560         callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
 2561 
 2562         return;
 2563 }
 2564 
 2565 static int
 2566 skc_shutdown(dev)
 2567         device_t                dev;
 2568 {
 2569         struct sk_softc         *sc;
 2570 
 2571         sc = device_get_softc(dev);
 2572         SK_LOCK(sc);
 2573 
 2574         /* Turn off the 'driver is loaded' LED. */
 2575         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
 2576 
 2577         /*
 2578          * Reset the GEnesis controller. Doing this should also
 2579          * assert the resets on the attached XMAC(s).
 2580          */
 2581         sk_reset(sc);
 2582         SK_UNLOCK(sc);
 2583 
 2584         return (0);
 2585 }
 2586 
 2587 static int
 2588 skc_suspend(dev)
 2589         device_t                dev;
 2590 {
 2591         struct sk_softc         *sc;
 2592         struct sk_if_softc      *sc_if0, *sc_if1;
 2593         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 2594 
 2595         sc = device_get_softc(dev);
 2596 
 2597         SK_LOCK(sc);
 2598 
 2599         sc_if0 = sc->sk_if[SK_PORT_A];
 2600         sc_if1 = sc->sk_if[SK_PORT_B];
 2601         if (sc_if0 != NULL)
 2602                 ifp0 = sc_if0->sk_ifp;
 2603         if (sc_if1 != NULL)
 2604                 ifp1 = sc_if1->sk_ifp;
 2605         if (ifp0 != NULL)
 2606                 sk_stop(sc_if0);
 2607         if (ifp1 != NULL)
 2608                 sk_stop(sc_if1);
 2609         sc->sk_suspended = 1;
 2610 
 2611         SK_UNLOCK(sc);
 2612 
 2613         return (0);
 2614 }
 2615 
 2616 static int
 2617 skc_resume(dev)
 2618         device_t                dev;
 2619 {
 2620         struct sk_softc         *sc;
 2621         struct sk_if_softc      *sc_if0, *sc_if1;
 2622         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 2623 
 2624         sc = device_get_softc(dev);
 2625 
 2626         SK_LOCK(sc);
 2627 
 2628         sc_if0 = sc->sk_if[SK_PORT_A];
 2629         sc_if1 = sc->sk_if[SK_PORT_B];
 2630         if (sc_if0 != NULL)
 2631                 ifp0 = sc_if0->sk_ifp;
 2632         if (sc_if1 != NULL)
 2633                 ifp1 = sc_if1->sk_ifp;
 2634         if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
 2635                 sk_init_locked(sc_if0);
 2636         if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
 2637                 sk_init_locked(sc_if1);
 2638         sc->sk_suspended = 0;
 2639 
 2640         SK_UNLOCK(sc);
 2641 
 2642         return (0);
 2643 }
 2644 
 2645 /*
 2646  * According to the data sheet from SK-NET GENESIS the hardware can compute
 2647  * two Rx checksums at the same time(Each checksum start position is
 2648  * programmed in Rx descriptors). However it seems that TCP/UDP checksum
 2649  * does not work at least on my Yukon hardware. I tried every possible ways
 2650  * to get correct checksum value but couldn't get correct one. So TCP/UDP
 2651  * checksum offload was disabled at the moment and only IP checksum offload
 2652  * was enabled.
 2653  * As nomral IP header size is 20 bytes I can't expect it would give an
 2654  * increase in throughput. However it seems it doesn't hurt performance in
 2655  * my testing. If there is a more detailed information for checksum secret
 2656  * of the hardware in question please contact yongari@FreeBSD.org to add
 2657  * TCP/UDP checksum offload support.
 2658  */
 2659 static __inline void
 2660 sk_rxcksum(ifp, m, csum)
 2661         struct ifnet            *ifp;
 2662         struct mbuf             *m;
 2663         u_int32_t               csum;
 2664 {
 2665         struct ether_header     *eh;
 2666         struct ip               *ip;
 2667         int32_t                 hlen, len, pktlen;
 2668         u_int16_t               csum1, csum2, ipcsum;
 2669 
 2670         pktlen = m->m_pkthdr.len;
 2671         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 2672                 return;
 2673         eh = mtod(m, struct ether_header *);
 2674         if (eh->ether_type != htons(ETHERTYPE_IP))
 2675                 return;
 2676         ip = (struct ip *)(eh + 1);
 2677         if (ip->ip_v != IPVERSION)
 2678                 return;
 2679         hlen = ip->ip_hl << 2;
 2680         pktlen -= sizeof(struct ether_header);
 2681         if (hlen < sizeof(struct ip))
 2682                 return;
 2683         if (ntohs(ip->ip_len) < hlen)
 2684                 return;
 2685         if (ntohs(ip->ip_len) != pktlen)
 2686                 return;
 2687 
 2688         csum1 = htons(csum & 0xffff);
 2689         csum2 = htons((csum >> 16) & 0xffff);
 2690         ipcsum = in_addword(csum1, ~csum2 & 0xffff);
 2691         /* checksum fixup for IP options */
 2692         len = hlen - sizeof(struct ip);
 2693         if (len > 0) {
 2694                 /*
 2695                  * If the second checksum value is correct we can compute IP
 2696                  * checksum with simple math. Unfortunately the second checksum
 2697                  * value is wrong so we can't verify the checksum from the
 2698                  * value(It seems there is some magic here to get correct
 2699                  * value). If the second checksum value is correct it also
 2700                  * means we can get TCP/UDP checksum) here. However, it still
 2701                  * needs pseudo header checksum calculation due to hardware
 2702                  * limitations.
 2703                  */
 2704                 return;
 2705         }
 2706         m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
 2707         if (ipcsum == 0xffff)
 2708                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 2709 }
 2710 
 2711 static __inline int
 2712 sk_rxvalid(sc, stat, len)
 2713         struct sk_softc         *sc;
 2714         u_int32_t               stat, len;
 2715 {
 2716 
 2717         if (sc->sk_type == SK_GENESIS) {
 2718                 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
 2719                     XM_RXSTAT_BYTES(stat) != len)
 2720                         return (0);
 2721         } else {
 2722                 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
 2723                     YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
 2724                     YU_RXSTAT_JABBER)) != 0 ||
 2725                     (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
 2726                     YU_RXSTAT_BYTES(stat) != len)
 2727                         return (0);
 2728         }
 2729 
 2730         return (1);
 2731 }
 2732 
 2733 static void
 2734 sk_rxeof(sc_if)
 2735         struct sk_if_softc      *sc_if;
 2736 {
 2737         struct sk_softc         *sc;
 2738         struct mbuf             *m;
 2739         struct ifnet            *ifp;
 2740         struct sk_rx_desc       *cur_rx;
 2741         struct sk_rxdesc        *rxd;
 2742         int                     cons, prog;
 2743         u_int32_t               csum, rxstat, sk_ctl;
 2744 
 2745         sc = sc_if->sk_softc;
 2746         ifp = sc_if->sk_ifp;
 2747 
 2748         SK_IF_LOCK_ASSERT(sc_if);
 2749 
 2750         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
 2751             sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
 2752 
 2753         prog = 0;
 2754         for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
 2755             prog++, SK_INC(cons, SK_RX_RING_CNT)) {
 2756                 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
 2757                 sk_ctl = le32toh(cur_rx->sk_ctl);
 2758                 if ((sk_ctl & SK_RXCTL_OWN) != 0)
 2759                         break;
 2760                 rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
 2761                 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
 2762 
 2763                 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
 2764                     SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
 2765                     SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
 2766                     SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
 2767                     SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
 2768                     sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
 2769                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2770                         sk_discard_rxbuf(sc_if, cons);
 2771                         continue;
 2772                 }
 2773 
 2774                 m = rxd->rx_m;
 2775                 csum = le32toh(cur_rx->sk_csum);
 2776                 if (sk_newbuf(sc_if, cons) != 0) {
 2777                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2778                         /* reuse old buffer */
 2779                         sk_discard_rxbuf(sc_if, cons);
 2780                         continue;
 2781                 }
 2782                 m->m_pkthdr.rcvif = ifp;
 2783                 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
 2784                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 2785                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2786                         sk_rxcksum(ifp, m, csum);
 2787                 SK_IF_UNLOCK(sc_if);
 2788                 (*ifp->if_input)(ifp, m);
 2789                 SK_IF_LOCK(sc_if);
 2790         }
 2791 
 2792         if (prog > 0) {
 2793                 sc_if->sk_cdata.sk_rx_cons = cons;
 2794                 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
 2795                     sc_if->sk_cdata.sk_rx_ring_map,
 2796                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2797         }
 2798 }
 2799 
 2800 static void
 2801 sk_jumbo_rxeof(sc_if)
 2802         struct sk_if_softc      *sc_if;
 2803 {
 2804         struct sk_softc         *sc;
 2805         struct mbuf             *m;
 2806         struct ifnet            *ifp;
 2807         struct sk_rx_desc       *cur_rx;
 2808         struct sk_rxdesc        *jrxd;
 2809         int                     cons, prog;
 2810         u_int32_t               csum, rxstat, sk_ctl;
 2811 
 2812         sc = sc_if->sk_softc;
 2813         ifp = sc_if->sk_ifp;
 2814 
 2815         SK_IF_LOCK_ASSERT(sc_if);
 2816 
 2817         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2818             sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
 2819 
 2820         prog = 0;
 2821         for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
 2822             prog < SK_JUMBO_RX_RING_CNT;
 2823             prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
 2824                 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
 2825                 sk_ctl = le32toh(cur_rx->sk_ctl);
 2826                 if ((sk_ctl & SK_RXCTL_OWN) != 0)
 2827                         break;
 2828                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
 2829                 rxstat = le32toh(cur_rx->sk_xmac_rxstat);
 2830 
 2831                 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
 2832                     SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
 2833                     SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
 2834                     SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
 2835                     SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
 2836                     sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
 2837                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 2838                         sk_discard_jumbo_rxbuf(sc_if, cons);
 2839                         continue;
 2840                 }
 2841 
 2842                 m = jrxd->rx_m;
 2843                 csum = le32toh(cur_rx->sk_csum);
 2844                 if (sk_jumbo_newbuf(sc_if, cons) != 0) {
 2845                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 2846                         /* reuse old buffer */
 2847                         sk_discard_jumbo_rxbuf(sc_if, cons);
 2848                         continue;
 2849                 }
 2850                 m->m_pkthdr.rcvif = ifp;
 2851                 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
 2852                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 2853                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 2854                         sk_rxcksum(ifp, m, csum);
 2855                 SK_IF_UNLOCK(sc_if);
 2856                 (*ifp->if_input)(ifp, m);
 2857                 SK_IF_LOCK(sc_if);
 2858         }
 2859 
 2860         if (prog > 0) {
 2861                 sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
 2862                 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
 2863                     sc_if->sk_cdata.sk_jumbo_rx_ring_map,
 2864                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2865         }
 2866 }
 2867 
 2868 static void
 2869 sk_txeof(sc_if)
 2870         struct sk_if_softc      *sc_if;
 2871 {
 2872         struct sk_txdesc        *txd;
 2873         struct sk_tx_desc       *cur_tx;
 2874         struct ifnet            *ifp;
 2875         u_int32_t               idx, sk_ctl;
 2876 
 2877         ifp = sc_if->sk_ifp;
 2878 
 2879         txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
 2880         if (txd == NULL)
 2881                 return;
 2882         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2883             sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
 2884         /*
 2885          * Go through our tx ring and free mbufs for those
 2886          * frames that have been sent.
 2887          */
 2888         for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
 2889                 if (sc_if->sk_cdata.sk_tx_cnt <= 0)
 2890                         break;
 2891                 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
 2892                 sk_ctl = le32toh(cur_tx->sk_ctl);
 2893                 if (sk_ctl & SK_TXCTL_OWN)
 2894                         break;
 2895                 sc_if->sk_cdata.sk_tx_cnt--;
 2896                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 2897                 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
 2898                         continue;
 2899                 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
 2900                     BUS_DMASYNC_POSTWRITE);
 2901                 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
 2902 
 2903                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 2904                 m_freem(txd->tx_m);
 2905                 txd->tx_m = NULL;
 2906                 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
 2907                 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
 2908                 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
 2909         }
 2910         sc_if->sk_cdata.sk_tx_cons = idx;
 2911         sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
 2912 
 2913         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
 2914             sc_if->sk_cdata.sk_tx_ring_map,
 2915             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2916 }
 2917 
 2918 static void
 2919 sk_tick(xsc_if)
 2920         void                    *xsc_if;
 2921 {
 2922         struct sk_if_softc      *sc_if;
 2923         struct mii_data         *mii;
 2924         struct ifnet            *ifp;
 2925         int                     i;
 2926 
 2927         sc_if = xsc_if;
 2928         ifp = sc_if->sk_ifp;
 2929         mii = device_get_softc(sc_if->sk_miibus);
 2930 
 2931         if (!(ifp->if_flags & IFF_UP))
 2932                 return;
 2933 
 2934         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 2935                 sk_intr_bcom(sc_if);
 2936                 return;
 2937         }
 2938 
 2939         /*
 2940          * According to SysKonnect, the correct way to verify that
 2941          * the link has come back up is to poll bit 0 of the GPIO
 2942          * register three times. This pin has the signal from the
 2943          * link_sync pin connected to it; if we read the same link
 2944          * state 3 times in a row, we know the link is up.
 2945          */
 2946         for (i = 0; i < 3; i++) {
 2947                 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
 2948                         break;
 2949         }
 2950 
 2951         if (i != 3) {
 2952                 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 2953                 return;
 2954         }
 2955 
 2956         /* Turn the GP0 interrupt back on. */
 2957         SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
 2958         SK_XM_READ_2(sc_if, XM_ISR);
 2959         mii_tick(mii);
 2960         callout_stop(&sc_if->sk_tick_ch);
 2961 }
 2962 
 2963 static void
 2964 sk_yukon_tick(xsc_if)
 2965         void                    *xsc_if;
 2966 {
 2967         struct sk_if_softc      *sc_if;
 2968         struct mii_data         *mii;
 2969 
 2970         sc_if = xsc_if;
 2971         mii = device_get_softc(sc_if->sk_miibus);
 2972 
 2973         mii_tick(mii);
 2974         callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
 2975 }
 2976 
 2977 static void
 2978 sk_intr_bcom(sc_if)
 2979         struct sk_if_softc      *sc_if;
 2980 {
 2981         struct mii_data         *mii;
 2982         struct ifnet            *ifp;
 2983         int                     status;
 2984         mii = device_get_softc(sc_if->sk_miibus);
 2985         ifp = sc_if->sk_ifp;
 2986 
 2987         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 2988 
 2989         /*
 2990          * Read the PHY interrupt register to make sure
 2991          * we clear any pending interrupts.
 2992          */
 2993         status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
 2994 
 2995         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
 2996                 sk_init_xmac(sc_if);
 2997                 return;
 2998         }
 2999 
 3000         if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
 3001                 int                     lstat;
 3002                 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
 3003                     BRGPHY_MII_AUXSTS);
 3004 
 3005                 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
 3006                         mii_mediachg(mii);
 3007                         /* Turn off the link LED. */
 3008                         SK_IF_WRITE_1(sc_if, 0,
 3009                             SK_LINKLED1_CTL, SK_LINKLED_OFF);
 3010                         sc_if->sk_link = 0;
 3011                 } else if (status & BRGPHY_ISR_LNK_CHG) {
 3012                         sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3013                             BRGPHY_MII_IMR, 0xFF00);
 3014                         mii_tick(mii);
 3015                         sc_if->sk_link = 1;
 3016                         /* Turn on the link LED. */
 3017                         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
 3018                             SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
 3019                             SK_LINKLED_BLINK_OFF);
 3020                 } else {
 3021                         mii_tick(mii);
 3022                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3023                 }
 3024         }
 3025 
 3026         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 3027 
 3028         return;
 3029 }
 3030 
 3031 static void
 3032 sk_intr_xmac(sc_if)
 3033         struct sk_if_softc      *sc_if;
 3034 {
 3035         u_int16_t               status;
 3036 
 3037         status = SK_XM_READ_2(sc_if, XM_ISR);
 3038 
 3039         /*
 3040          * Link has gone down. Start MII tick timeout to
 3041          * watch for link resync.
 3042          */
 3043         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
 3044                 if (status & XM_ISR_GP0_SET) {
 3045                         SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
 3046                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3047                 }
 3048 
 3049                 if (status & XM_ISR_AUTONEG_DONE) {
 3050                         callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
 3051                 }
 3052         }
 3053 
 3054         if (status & XM_IMR_TX_UNDERRUN)
 3055                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
 3056 
 3057         if (status & XM_IMR_RX_OVERRUN)
 3058                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
 3059 
 3060         status = SK_XM_READ_2(sc_if, XM_ISR);
 3061 
 3062         return;
 3063 }
 3064 
 3065 static void
 3066 sk_intr_yukon(sc_if)
 3067         struct sk_if_softc      *sc_if;
 3068 {
 3069         u_int8_t status;
 3070 
 3071         status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
 3072         /* RX overrun */
 3073         if ((status & SK_GMAC_INT_RX_OVER) != 0) {
 3074                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
 3075                     SK_RFCTL_RX_FIFO_OVER);
 3076         }
 3077         /* TX underrun */
 3078         if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
 3079                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
 3080                     SK_TFCTL_TX_FIFO_UNDER);
 3081         }
 3082 }
 3083 
 3084 static void
 3085 sk_intr(xsc)
 3086         void                    *xsc;
 3087 {
 3088         struct sk_softc         *sc = xsc;
 3089         struct sk_if_softc      *sc_if0, *sc_if1;
 3090         struct ifnet            *ifp0 = NULL, *ifp1 = NULL;
 3091         u_int32_t               status;
 3092 
 3093         SK_LOCK(sc);
 3094 
 3095         status = CSR_READ_4(sc, SK_ISSR);
 3096         if (status == 0 || status == 0xffffffff || sc->sk_suspended)
 3097                 goto done_locked;
 3098 
 3099         sc_if0 = sc->sk_if[SK_PORT_A];
 3100         sc_if1 = sc->sk_if[SK_PORT_B];
 3101 
 3102         if (sc_if0 != NULL)
 3103                 ifp0 = sc_if0->sk_ifp;
 3104         if (sc_if1 != NULL)
 3105                 ifp1 = sc_if1->sk_ifp;
 3106 
 3107         for (; (status &= sc->sk_intrmask) != 0;) {
 3108                 /* Handle receive interrupts first. */
 3109                 if (status & SK_ISR_RX1_EOF) {
 3110                         if (ifp0->if_mtu > SK_MAX_FRAMELEN)
 3111                                 sk_jumbo_rxeof(sc_if0);
 3112                         else
 3113                                 sk_rxeof(sc_if0);
 3114                         CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
 3115                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
 3116                 }
 3117                 if (status & SK_ISR_RX2_EOF) {
 3118                         if (ifp1->if_mtu > SK_MAX_FRAMELEN)
 3119                                 sk_jumbo_rxeof(sc_if1);
 3120                         else
 3121                                 sk_rxeof(sc_if1);
 3122                         CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
 3123                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
 3124                 }
 3125 
 3126                 /* Then transmit interrupts. */
 3127                 if (status & SK_ISR_TX1_S_EOF) {
 3128                         sk_txeof(sc_if0);
 3129                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
 3130                 }
 3131                 if (status & SK_ISR_TX2_S_EOF) {
 3132                         sk_txeof(sc_if1);
 3133                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
 3134                 }
 3135 
 3136                 /* Then MAC interrupts. */
 3137                 if (status & SK_ISR_MAC1 &&
 3138                     ifp0->if_drv_flags & IFF_DRV_RUNNING) {
 3139                         if (sc->sk_type == SK_GENESIS)
 3140                                 sk_intr_xmac(sc_if0);
 3141                         else
 3142                                 sk_intr_yukon(sc_if0);
 3143                 }
 3144 
 3145                 if (status & SK_ISR_MAC2 &&
 3146                     ifp1->if_drv_flags & IFF_DRV_RUNNING) {
 3147                         if (sc->sk_type == SK_GENESIS)
 3148                                 sk_intr_xmac(sc_if1);
 3149                         else
 3150                                 sk_intr_yukon(sc_if1);
 3151                 }
 3152 
 3153                 if (status & SK_ISR_EXTERNAL_REG) {
 3154                         if (ifp0 != NULL &&
 3155                             sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
 3156                                 sk_intr_bcom(sc_if0);
 3157                         if (ifp1 != NULL &&
 3158                             sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
 3159                                 sk_intr_bcom(sc_if1);
 3160                 }
 3161                 status = CSR_READ_4(sc, SK_ISSR);
 3162         }
 3163 
 3164         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3165 
 3166         if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3167                 sk_start_locked(ifp0);
 3168         if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3169                 sk_start_locked(ifp1);
 3170 
 3171 done_locked:
 3172         SK_UNLOCK(sc);
 3173 }
 3174 
 3175 static void
 3176 sk_init_xmac(sc_if)
 3177         struct sk_if_softc      *sc_if;
 3178 {
 3179         struct sk_softc         *sc;
 3180         struct ifnet            *ifp;
 3181         u_int16_t               eaddr[(ETHER_ADDR_LEN+1)/2];
 3182         static const struct sk_bcom_hack bhack[] = {
 3183         { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
 3184         { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
 3185         { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
 3186         { 0, 0 } };
 3187 
 3188         SK_IF_LOCK_ASSERT(sc_if);
 3189 
 3190         sc = sc_if->sk_softc;
 3191         ifp = sc_if->sk_ifp;
 3192 
 3193         /* Unreset the XMAC. */
 3194         SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
 3195         DELAY(1000);
 3196 
 3197         /* Reset the XMAC's internal state. */
 3198         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
 3199 
 3200         /* Save the XMAC II revision */
 3201         sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
 3202 
 3203         /*
 3204          * Perform additional initialization for external PHYs,
 3205          * namely for the 1000baseTX cards that use the XMAC's
 3206          * GMII mode.
 3207          */
 3208         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 3209                 int                     i = 0;
 3210                 u_int32_t               val;
 3211 
 3212                 /* Take PHY out of reset. */
 3213                 val = sk_win_read_4(sc, SK_GPIO);
 3214                 if (sc_if->sk_port == SK_PORT_A)
 3215                         val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
 3216                 else
 3217                         val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
 3218                 sk_win_write_4(sc, SK_GPIO, val);
 3219 
 3220                 /* Enable GMII mode on the XMAC. */
 3221                 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
 3222 
 3223                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3224                     BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
 3225                 DELAY(10000);
 3226                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3227                     BRGPHY_MII_IMR, 0xFFF0);
 3228 
 3229                 /*
 3230                  * Early versions of the BCM5400 apparently have
 3231                  * a bug that requires them to have their reserved
 3232                  * registers initialized to some magic values. I don't
 3233                  * know what the numbers do, I'm just the messenger.
 3234                  */
 3235                 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
 3236                     == 0x6041) {
 3237                         while(bhack[i].reg) {
 3238                                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
 3239                                     bhack[i].reg, bhack[i].val);
 3240                                 i++;
 3241                         }
 3242                 }
 3243         }
 3244 
 3245         /* Set station address */
 3246         bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
 3247         SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
 3248         SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
 3249         SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
 3250         SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
 3251 
 3252         if (ifp->if_flags & IFF_BROADCAST) {
 3253                 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
 3254         } else {
 3255                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
 3256         }
 3257 
 3258         /* We don't need the FCS appended to the packet. */
 3259         SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
 3260 
 3261         /* We want short frames padded to 60 bytes. */
 3262         SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
 3263 
 3264         /*
 3265          * Enable the reception of all error frames. This is is
 3266          * a necessary evil due to the design of the XMAC. The
 3267          * XMAC's receive FIFO is only 8K in size, however jumbo
 3268          * frames can be up to 9000 bytes in length. When bad
 3269          * frame filtering is enabled, the XMAC's RX FIFO operates
 3270          * in 'store and forward' mode. For this to work, the
 3271          * entire frame has to fit into the FIFO, but that means
 3272          * that jumbo frames larger than 8192 bytes will be
 3273          * truncated. Disabling all bad frame filtering causes
 3274          * the RX FIFO to operate in streaming mode, in which
 3275          * case the XMAC will start transferring frames out of the
 3276          * RX FIFO as soon as the FIFO threshold is reached.
 3277          */
 3278         if (ifp->if_mtu > SK_MAX_FRAMELEN) {
 3279                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
 3280                     XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
 3281                     XM_MODE_RX_INRANGELEN);
 3282                 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
 3283         } else
 3284                 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
 3285 
 3286         /*
 3287          * Bump up the transmit threshold. This helps hold off transmit
 3288          * underruns when we're blasting traffic from both ports at once.
 3289          */
 3290         SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
 3291 
 3292         /* Set Rx filter */
 3293         sk_rxfilter_genesis(sc_if);
 3294 
 3295         /* Clear and enable interrupts */
 3296         SK_XM_READ_2(sc_if, XM_ISR);
 3297         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
 3298                 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
 3299         else
 3300                 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
 3301 
 3302         /* Configure MAC arbiter */
 3303         switch(sc_if->sk_xmac_rev) {
 3304         case XM_XMAC_REV_B2:
 3305                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
 3306                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
 3307                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
 3308                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
 3309                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
 3310                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
 3311                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
 3312                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
 3313                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
 3314                 break;
 3315         case XM_XMAC_REV_C1:
 3316                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
 3317                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
 3318                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
 3319                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
 3320                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
 3321                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
 3322                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
 3323                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
 3324                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
 3325                 break;
 3326         default:
 3327                 break;
 3328         }
 3329         sk_win_write_2(sc, SK_MACARB_CTL,
 3330             SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
 3331 
 3332         sc_if->sk_link = 1;
 3333 
 3334         return;
 3335 }
 3336 
 3337 static void
 3338 sk_init_yukon(sc_if)
 3339         struct sk_if_softc      *sc_if;
 3340 {
 3341         u_int32_t               phy, v;
 3342         u_int16_t               reg;
 3343         struct sk_softc         *sc;
 3344         struct ifnet            *ifp;
 3345         u_int8_t                *eaddr;
 3346         int                     i;
 3347 
 3348         SK_IF_LOCK_ASSERT(sc_if);
 3349 
 3350         sc = sc_if->sk_softc;
 3351         ifp = sc_if->sk_ifp;
 3352 
 3353         if (sc->sk_type == SK_YUKON_LITE &&
 3354             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
 3355                 /*
 3356                  * Workaround code for COMA mode, set PHY reset.
 3357                  * Otherwise it will not correctly take chip out of
 3358                  * powerdown (coma)
 3359                  */
 3360                 v = sk_win_read_4(sc, SK_GPIO);
 3361                 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
 3362                 sk_win_write_4(sc, SK_GPIO, v);
 3363         }
 3364 
 3365         /* GMAC and GPHY Reset */
 3366         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
 3367         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
 3368         DELAY(1000);
 3369 
 3370         if (sc->sk_type == SK_YUKON_LITE &&
 3371             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
 3372                 /*
 3373                  * Workaround code for COMA mode, clear PHY reset
 3374                  */
 3375                 v = sk_win_read_4(sc, SK_GPIO);
 3376                 v |= SK_GPIO_DIR9;
 3377                 v &= ~SK_GPIO_DAT9;
 3378                 sk_win_write_4(sc, SK_GPIO, v);
 3379         }
 3380 
 3381         phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
 3382                 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
 3383 
 3384         if (sc->sk_coppertype)
 3385                 phy |= SK_GPHY_COPPER;
 3386         else
 3387                 phy |= SK_GPHY_FIBER;
 3388 
 3389         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
 3390         DELAY(1000);
 3391         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
 3392         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
 3393                       SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
 3394 
 3395         /* unused read of the interrupt source register */
 3396         SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
 3397 
 3398         reg = SK_YU_READ_2(sc_if, YUKON_PAR);
 3399 
 3400         /* MIB Counter Clear Mode set */
 3401         reg |= YU_PAR_MIB_CLR;
 3402         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 3403 
 3404         /* MIB Counter Clear Mode clear */
 3405         reg &= ~YU_PAR_MIB_CLR;
 3406         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
 3407 
 3408         /* receive control reg */
 3409         SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
 3410 
 3411         /* transmit parameter register */
 3412         SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
 3413                       YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
 3414 
 3415         /* serial mode register */
 3416         reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
 3417         if (ifp->if_mtu > SK_MAX_FRAMELEN)
 3418                 reg |= YU_SMR_MFL_JUMBO;
 3419         SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
 3420 
 3421         /* Setup Yukon's station address */
 3422         eaddr = IF_LLADDR(sc_if->sk_ifp);
 3423         for (i = 0; i < 3; i++)
 3424                 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
 3425                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3426         /* Set GMAC source address of flow control. */
 3427         for (i = 0; i < 3; i++)
 3428                 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
 3429                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3430         /* Set GMAC virtual address. */
 3431         for (i = 0; i < 3; i++)
 3432                 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
 3433                     eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
 3434 
 3435         /* Set Rx filter */
 3436         sk_rxfilter_yukon(sc_if);
 3437 
 3438         /* enable interrupt mask for counter overflows */
 3439         SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
 3440         SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
 3441         SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
 3442 
 3443         /* Configure RX MAC FIFO Flush Mask */
 3444         v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
 3445             YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
 3446             YU_RXSTAT_JABBER;
 3447         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
 3448 
 3449         /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
 3450         if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
 3451                 v = SK_TFCTL_OPERATION_ON;
 3452         else
 3453                 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
 3454         /* Configure RX MAC FIFO */
 3455         SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
 3456         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
 3457 
 3458         /* Increase flush threshould to 64 bytes */
 3459         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
 3460             SK_RFCTL_FIFO_THRESHOLD + 1);
 3461 
 3462         /* Configure TX MAC FIFO */
 3463         SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
 3464         SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
 3465 }
 3466 
 3467 /*
 3468  * Note that to properly initialize any part of the GEnesis chip,
 3469  * you first have to take it out of reset mode.
 3470  */
 3471 static void
 3472 sk_init(xsc)
 3473         void                    *xsc;
 3474 {
 3475         struct sk_if_softc      *sc_if = xsc;
 3476 
 3477         SK_IF_LOCK(sc_if);
 3478         sk_init_locked(sc_if);
 3479         SK_IF_UNLOCK(sc_if);
 3480 
 3481         return;
 3482 }
 3483 
 3484 static void
 3485 sk_init_locked(sc_if)
 3486         struct sk_if_softc      *sc_if;
 3487 {
 3488         struct sk_softc         *sc;
 3489         struct ifnet            *ifp;
 3490         struct mii_data         *mii;
 3491         u_int16_t               reg;
 3492         u_int32_t               imr;
 3493         int                     error;
 3494 
 3495         SK_IF_LOCK_ASSERT(sc_if);
 3496 
 3497         ifp = sc_if->sk_ifp;
 3498         sc = sc_if->sk_softc;
 3499         mii = device_get_softc(sc_if->sk_miibus);
 3500 
 3501         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 3502                 return;
 3503 
 3504         /* Cancel pending I/O and free all RX/TX buffers. */
 3505         sk_stop(sc_if);
 3506 
 3507         if (sc->sk_type == SK_GENESIS) {
 3508                 /* Configure LINK_SYNC LED */
 3509                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
 3510                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
 3511                         SK_LINKLED_LINKSYNC_ON);
 3512 
 3513                 /* Configure RX LED */
 3514                 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
 3515                         SK_RXLEDCTL_COUNTER_START);
 3516 
 3517                 /* Configure TX LED */
 3518                 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
 3519                         SK_TXLEDCTL_COUNTER_START);
 3520         }
 3521 
 3522         /*
 3523          * Configure descriptor poll timer
 3524          *
 3525          * SK-NET GENESIS data sheet says that possibility of losing Start
 3526          * transmit command due to CPU/cache related interim storage problems
 3527          * under certain conditions. The document recommends a polling
 3528          * mechanism to send a Start transmit command to initiate transfer
 3529          * of ready descriptors regulary. To cope with this issue sk(4) now
 3530          * enables descriptor poll timer to initiate descriptor processing
 3531          * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
 3532          * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
 3533          * command instead of waiting for next descriptor polling time.
 3534          * The same rule may apply to Rx side too but it seems that is not
 3535          * needed at the moment.
 3536          * Since sk(4) uses descriptor polling as a last resort there is no
 3537          * need to set smaller polling time than maximum allowable one.
 3538          */
 3539         SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
 3540 
 3541         /* Configure I2C registers */
 3542 
 3543         /* Configure XMAC(s) */
 3544         switch (sc->sk_type) {
 3545         case SK_GENESIS:
 3546                 sk_init_xmac(sc_if);
 3547                 break;
 3548         case SK_YUKON:
 3549         case SK_YUKON_LITE:
 3550         case SK_YUKON_LP:
 3551                 sk_init_yukon(sc_if);
 3552                 break;
 3553         }
 3554         mii_mediachg(mii);
 3555 
 3556         if (sc->sk_type == SK_GENESIS) {
 3557                 /* Configure MAC FIFOs */
 3558                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
 3559                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
 3560                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
 3561 
 3562                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
 3563                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
 3564                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
 3565         }
 3566 
 3567         /* Configure transmit arbiter(s) */
 3568         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
 3569             SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
 3570 
 3571         /* Configure RAMbuffers */
 3572         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
 3573         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
 3574         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
 3575         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
 3576         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
 3577         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
 3578 
 3579         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
 3580         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
 3581         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
 3582         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
 3583         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
 3584         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
 3585         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
 3586 
 3587         /* Configure BMUs */
 3588         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
 3589         if (ifp->if_mtu > SK_MAX_FRAMELEN) {
 3590                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
 3591                     SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
 3592                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
 3593                     SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
 3594         } else {
 3595                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
 3596                     SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
 3597                 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
 3598                     SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
 3599         }
 3600 
 3601         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
 3602         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
 3603             SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
 3604         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
 3605             SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
 3606 
 3607         /* Init descriptors */
 3608         if (ifp->if_mtu > SK_MAX_FRAMELEN)
 3609                 error = sk_init_jumbo_rx_ring(sc_if);
 3610         else
 3611                 error = sk_init_rx_ring(sc_if);
 3612         if (error != 0) {
 3613                 device_printf(sc_if->sk_if_dev,
 3614                     "initialization failed: no memory for rx buffers\n");
 3615                 sk_stop(sc_if);
 3616                 return;
 3617         }
 3618         sk_init_tx_ring(sc_if);
 3619 
 3620         /* Set interrupt moderation if changed via sysctl. */
 3621         imr = sk_win_read_4(sc, SK_IMTIMERINIT);
 3622         if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
 3623                 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
 3624                     sc->sk_int_ticks));
 3625                 if (bootverbose)
 3626                         device_printf(sc_if->sk_if_dev,
 3627                             "interrupt moderation is %d us.\n",
 3628                             sc->sk_int_mod);
 3629         }
 3630 
 3631         /* Configure interrupt handling */
 3632         CSR_READ_4(sc, SK_ISSR);
 3633         if (sc_if->sk_port == SK_PORT_A)
 3634                 sc->sk_intrmask |= SK_INTRS1;
 3635         else
 3636                 sc->sk_intrmask |= SK_INTRS2;
 3637 
 3638         sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
 3639 
 3640         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3641 
 3642         /* Start BMUs. */
 3643         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
 3644 
 3645         switch(sc->sk_type) {
 3646         case SK_GENESIS:
 3647                 /* Enable XMACs TX and RX state machines */
 3648                 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
 3649                 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
 3650                 break;
 3651         case SK_YUKON:
 3652         case SK_YUKON_LITE:
 3653         case SK_YUKON_LP:
 3654                 reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
 3655                 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
 3656 #if 0
 3657                 /* XXX disable 100Mbps and full duplex mode? */
 3658                 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
 3659 #endif
 3660                 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
 3661         }
 3662 
 3663         /* Activate descriptor polling timer */
 3664         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
 3665         /* start transfer of Tx descriptors */
 3666         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
 3667 
 3668         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3669         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3670 
 3671         switch (sc->sk_type) {
 3672         case SK_YUKON:
 3673         case SK_YUKON_LITE:
 3674         case SK_YUKON_LP:
 3675                 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
 3676                 break;
 3677         }
 3678 
 3679         callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
 3680 
 3681         return;
 3682 }
 3683 
 3684 static void
 3685 sk_stop(sc_if)
 3686         struct sk_if_softc      *sc_if;
 3687 {
 3688         int                     i;
 3689         struct sk_softc         *sc;
 3690         struct sk_txdesc        *txd;
 3691         struct sk_rxdesc        *rxd;
 3692         struct sk_rxdesc        *jrxd;
 3693         struct ifnet            *ifp;
 3694         u_int32_t               val;
 3695 
 3696         SK_IF_LOCK_ASSERT(sc_if);
 3697         sc = sc_if->sk_softc;
 3698         ifp = sc_if->sk_ifp;
 3699 
 3700         callout_stop(&sc_if->sk_tick_ch);
 3701         callout_stop(&sc_if->sk_watchdog_ch);
 3702 
 3703         /* stop Tx descriptor polling timer */
 3704         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
 3705         /* stop transfer of Tx descriptors */
 3706         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
 3707         for (i = 0; i < SK_TIMEOUT; i++) {
 3708                 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
 3709                 if ((val & SK_TXBMU_TX_STOP) == 0)
 3710                         break;
 3711                 DELAY(1);
 3712         }
 3713         if (i == SK_TIMEOUT)
 3714                 device_printf(sc_if->sk_if_dev,
 3715                     "can not stop transfer of Tx descriptor\n");
 3716         /* stop transfer of Rx descriptors */
 3717         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
 3718         for (i = 0; i < SK_TIMEOUT; i++) {
 3719                 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
 3720                 if ((val & SK_RXBMU_RX_STOP) == 0)
 3721                         break;
 3722                 DELAY(1);
 3723         }
 3724         if (i == SK_TIMEOUT)
 3725                 device_printf(sc_if->sk_if_dev,
 3726                     "can not stop transfer of Rx descriptor\n");
 3727 
 3728         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
 3729                 /* Put PHY back into reset. */
 3730                 val = sk_win_read_4(sc, SK_GPIO);
 3731                 if (sc_if->sk_port == SK_PORT_A) {
 3732                         val |= SK_GPIO_DIR0;
 3733                         val &= ~SK_GPIO_DAT0;
 3734                 } else {
 3735                         val |= SK_GPIO_DIR2;
 3736                         val &= ~SK_GPIO_DAT2;
 3737                 }
 3738                 sk_win_write_4(sc, SK_GPIO, val);
 3739         }
 3740 
 3741         /* Turn off various components of this interface. */
 3742         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
 3743         switch (sc->sk_type) {
 3744         case SK_GENESIS:
 3745                 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
 3746                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
 3747                 break;
 3748         case SK_YUKON:
 3749         case SK_YUKON_LITE:
 3750         case SK_YUKON_LP:
 3751                 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
 3752                 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
 3753                 break;
 3754         }
 3755         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
 3756         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 3757         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
 3758         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
 3759         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
 3760         SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
 3761         SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
 3762         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
 3763         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
 3764 
 3765         /* Disable interrupts */
 3766         if (sc_if->sk_port == SK_PORT_A)
 3767                 sc->sk_intrmask &= ~SK_INTRS1;
 3768         else
 3769                 sc->sk_intrmask &= ~SK_INTRS2;
 3770         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
 3771 
 3772         SK_XM_READ_2(sc_if, XM_ISR);
 3773         SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
 3774 
 3775         /* Free RX and TX mbufs still in the queues. */
 3776         for (i = 0; i < SK_RX_RING_CNT; i++) {
 3777                 rxd = &sc_if->sk_cdata.sk_rxdesc[i];
 3778                 if (rxd->rx_m != NULL) {
 3779                         bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
 3780                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 3781                         bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
 3782                             rxd->rx_dmamap);
 3783                         m_freem(rxd->rx_m);
 3784                         rxd->rx_m = NULL;
 3785                 }
 3786         }
 3787         for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
 3788                 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
 3789                 if (jrxd->rx_m != NULL) {
 3790                         bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
 3791                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 3792                         bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
 3793                             jrxd->rx_dmamap);
 3794                         m_freem(jrxd->rx_m);
 3795                         jrxd->rx_m = NULL;
 3796                 }
 3797         }
 3798         for (i = 0; i < SK_TX_RING_CNT; i++) {
 3799                 txd = &sc_if->sk_cdata.sk_txdesc[i];
 3800                 if (txd->tx_m != NULL) {
 3801                         bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
 3802                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 3803                         bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
 3804                             txd->tx_dmamap);
 3805                         m_freem(txd->tx_m);
 3806                         txd->tx_m = NULL;
 3807                 }
 3808         }
 3809 
 3810         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
 3811 
 3812         return;
 3813 }
 3814 
 3815 static int
 3816 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 3817 {
 3818         int error, value;
 3819 
 3820         if (!arg1)
 3821                 return (EINVAL);
 3822         value = *(int *)arg1;
 3823         error = sysctl_handle_int(oidp, &value, 0, req);
 3824         if (error || !req->newptr)
 3825                 return (error);
 3826         if (value < low || value > high)
 3827                 return (EINVAL);
 3828         *(int *)arg1 = value;
 3829         return (0);
 3830 }
 3831 
 3832 static int
 3833 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
 3834 {
 3835         return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
 3836 }

Cache object: dcb3e959b0047bdfb7b99dad291c6b19


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.