The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * Copyright (c) 1997, 1998, 1999, 2000
   50  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   51  *
   52  * Redistribution and use in source and binary forms, with or without
   53  * modification, are permitted provided that the following conditions
   54  * are met:
   55  * 1. Redistributions of source code must retain the above copyright
   56  *    notice, this list of conditions and the following disclaimer.
   57  * 2. Redistributions in binary form must reproduce the above copyright
   58  *    notice, this list of conditions and the following disclaimer in the
   59  *    documentation and/or other materials provided with the distribution.
   60  * 3. All advertising materials mentioning features or use of this software
   61  *    must display the following acknowledgement:
   62  *      This product includes software developed by Bill Paul.
   63  * 4. Neither the name of the author nor the names of any co-contributors
   64  *    may be used to endorse or promote products derived from this software
   65  *    without specific prior written permission.
   66  *
   67  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   70  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   71  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   72  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   73  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   74  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   75  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   76  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   77  * THE POSSIBILITY OF SUCH DAMAGE.
   78  */
   79 /*-
   80  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   81  *
   82  * Permission to use, copy, modify, and distribute this software for any
   83  * purpose with or without fee is hereby granted, provided that the above
   84  * copyright notice and this permission notice appear in all copies.
   85  *
   86  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   87  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   88  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   89  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   90  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   91  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   92  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   93  */
   94 
   95 /*
   96  * Device driver for the Marvell Yukon II Ethernet controller.
   97  * Due to lack of documentation, this driver is based on the code from
   98  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
   99  */
  100 
  101 #include <sys/cdefs.h>
  102 __FBSDID("$FreeBSD: releng/8.0/sys/dev/msk/if_msk.c 195049 2009-06-26 11:45:06Z rwatson $");
  103 
  104 #include <sys/param.h>
  105 #include <sys/systm.h>
  106 #include <sys/bus.h>
  107 #include <sys/endian.h>
  108 #include <sys/mbuf.h>
  109 #include <sys/malloc.h>
  110 #include <sys/kernel.h>
  111 #include <sys/module.h>
  112 #include <sys/socket.h>
  113 #include <sys/sockio.h>
  114 #include <sys/queue.h>
  115 #include <sys/sysctl.h>
  116 #include <sys/taskqueue.h>
  117 
  118 #include <net/bpf.h>
  119 #include <net/ethernet.h>
  120 #include <net/if.h>
  121 #include <net/if_arp.h>
  122 #include <net/if_dl.h>
  123 #include <net/if_media.h>
  124 #include <net/if_types.h>
  125 #include <net/if_vlan_var.h>
  126 
  127 #include <netinet/in.h>
  128 #include <netinet/in_systm.h>
  129 #include <netinet/ip.h>
  130 #include <netinet/tcp.h>
  131 #include <netinet/udp.h>
  132 
  133 #include <machine/bus.h>
  134 #include <machine/in_cksum.h>
  135 #include <machine/resource.h>
  136 #include <sys/rman.h>
  137 
  138 #include <dev/mii/mii.h>
  139 #include <dev/mii/miivar.h>
  140 #include <dev/mii/brgphyreg.h>
  141 
  142 #include <dev/pci/pcireg.h>
  143 #include <dev/pci/pcivar.h>
  144 
  145 #include <dev/msk/if_mskreg.h>
  146 
  147 MODULE_DEPEND(msk, pci, 1, 1, 1);
  148 MODULE_DEPEND(msk, ether, 1, 1, 1);
  149 MODULE_DEPEND(msk, miibus, 1, 1, 1);
  150 
  151 /* "device miibus" required.  See GENERIC if you get errors here. */
  152 #include "miibus_if.h"
  153 
  154 /* Tunables. */
  155 static int msi_disable = 0;
  156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
  157 static int legacy_intr = 0;
  158 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
  159 static int jumbo_disable = 0;
  160 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
  161 
  162 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  163 
  164 /*
  165  * Devices supported by this driver.
  166  */
  167 static struct msk_product {
  168         uint16_t        msk_vendorid;
  169         uint16_t        msk_deviceid;
  170         const char      *msk_name;
  171 } msk_products[] = {
  172         { VENDORID_SK, DEVICEID_SK_YUKON2,
  173             "SK-9Sxx Gigabit Ethernet" },
  174         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  175             "SK-9Exx Gigabit Ethernet"},
  176         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  177             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  178         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  179             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  180         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  181             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  182         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  183             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  184         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  185             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  186         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  187             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  188         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  189             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  190         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  191             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  192         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  193             "Marvell Yukon 88E8035 Fast Ethernet" },
  194         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  195             "Marvell Yukon 88E8036 Fast Ethernet" },
  196         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  197             "Marvell Yukon 88E8038 Fast Ethernet" },
  198         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  199             "Marvell Yukon 88E8039 Fast Ethernet" },
  200         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  201             "Marvell Yukon 88E8040 Fast Ethernet" },
  202         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  203             "Marvell Yukon 88E8040T Fast Ethernet" },
  204         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  205             "Marvell Yukon 88E8048 Fast Ethernet" },
  206         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  207             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  208         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  209             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  210         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  211             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  212         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  213             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  214         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  215             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  216         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  217             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  218         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  219             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  220         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  221             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  222         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  223             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  224         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  225             "D-Link 550SX Gigabit Ethernet" },
  226         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  227             "D-Link 560T Gigabit Ethernet" }
  228 };
  229 
  230 static const char *model_name[] = {
  231         "Yukon XL",
  232         "Yukon EC Ultra",
  233         "Yukon EX",
  234         "Yukon EC",
  235         "Yukon FE",
  236         "Yukon FE+"
  237 };
  238 
  239 static int mskc_probe(device_t);
  240 static int mskc_attach(device_t);
  241 static int mskc_detach(device_t);
  242 static int mskc_shutdown(device_t);
  243 static int mskc_setup_rambuffer(struct msk_softc *);
  244 static int mskc_suspend(device_t);
  245 static int mskc_resume(device_t);
  246 static void mskc_reset(struct msk_softc *);
  247 
  248 static int msk_probe(device_t);
  249 static int msk_attach(device_t);
  250 static int msk_detach(device_t);
  251 
  252 static void msk_tick(void *);
  253 static void msk_legacy_intr(void *);
  254 static int msk_intr(void *);
  255 static void msk_int_task(void *, int);
  256 static void msk_intr_phy(struct msk_if_softc *);
  257 static void msk_intr_gmac(struct msk_if_softc *);
  258 static __inline void msk_rxput(struct msk_if_softc *);
  259 static int msk_handle_events(struct msk_softc *);
  260 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  261 static void msk_intr_hwerr(struct msk_softc *);
  262 #ifndef __NO_STRICT_ALIGNMENT
  263 static __inline void msk_fixup_rx(struct mbuf *);
  264 #endif
  265 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  266 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  267 static void msk_txeof(struct msk_if_softc *, int);
  268 static int msk_encap(struct msk_if_softc *, struct mbuf **);
  269 static void msk_tx_task(void *, int);
  270 static void msk_start(struct ifnet *);
  271 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
  272 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  273 static void msk_set_rambuffer(struct msk_if_softc *);
  274 static void msk_set_tx_stfwd(struct msk_if_softc *);
  275 static void msk_init(void *);
  276 static void msk_init_locked(struct msk_if_softc *);
  277 static void msk_stop(struct msk_if_softc *);
  278 static void msk_watchdog(struct msk_if_softc *);
  279 static int msk_mediachange(struct ifnet *);
  280 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
  281 static void msk_phy_power(struct msk_softc *, int);
  282 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  283 static int msk_status_dma_alloc(struct msk_softc *);
  284 static void msk_status_dma_free(struct msk_softc *);
  285 static int msk_txrx_dma_alloc(struct msk_if_softc *);
  286 static int msk_rx_dma_jalloc(struct msk_if_softc *);
  287 static void msk_txrx_dma_free(struct msk_if_softc *);
  288 static void msk_rx_dma_jfree(struct msk_if_softc *);
  289 static int msk_init_rx_ring(struct msk_if_softc *);
  290 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  291 static void msk_init_tx_ring(struct msk_if_softc *);
  292 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
  293 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  294 static int msk_newbuf(struct msk_if_softc *, int);
  295 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  296 
  297 static int msk_phy_readreg(struct msk_if_softc *, int, int);
  298 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
  299 static int msk_miibus_readreg(device_t, int, int);
  300 static int msk_miibus_writereg(device_t, int, int, int);
  301 static void msk_miibus_statchg(device_t);
  302 
  303 static void msk_rxfilter(struct msk_if_softc *);
  304 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
  305 
  306 static void msk_stats_clear(struct msk_if_softc *);
  307 static void msk_stats_update(struct msk_if_softc *);
  308 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
  309 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
  310 static void msk_sysctl_node(struct msk_if_softc *);
  311 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  312 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
  313 
  314 static device_method_t mskc_methods[] = {
  315         /* Device interface */
  316         DEVMETHOD(device_probe,         mskc_probe),
  317         DEVMETHOD(device_attach,        mskc_attach),
  318         DEVMETHOD(device_detach,        mskc_detach),
  319         DEVMETHOD(device_suspend,       mskc_suspend),
  320         DEVMETHOD(device_resume,        mskc_resume),
  321         DEVMETHOD(device_shutdown,      mskc_shutdown),
  322 
  323         /* bus interface */
  324         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  325         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  326 
  327         { NULL, NULL }
  328 };
  329 
  330 static driver_t mskc_driver = {
  331         "mskc",
  332         mskc_methods,
  333         sizeof(struct msk_softc)
  334 };
  335 
  336 static devclass_t mskc_devclass;
  337 
  338 static device_method_t msk_methods[] = {
  339         /* Device interface */
  340         DEVMETHOD(device_probe,         msk_probe),
  341         DEVMETHOD(device_attach,        msk_attach),
  342         DEVMETHOD(device_detach,        msk_detach),
  343         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  344 
  345         /* bus interface */
  346         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  347         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  348 
  349         /* MII interface */
  350         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  351         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  352         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  353 
  354         { NULL, NULL }
  355 };
  356 
  357 static driver_t msk_driver = {
  358         "msk",
  359         msk_methods,
  360         sizeof(struct msk_if_softc)
  361 };
  362 
  363 static devclass_t msk_devclass;
  364 
  365 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
  366 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
  367 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
  368 
  369 static struct resource_spec msk_res_spec_io[] = {
  370         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  371         { -1,                   0,              0 }
  372 };
  373 
  374 static struct resource_spec msk_res_spec_mem[] = {
  375         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  376         { -1,                   0,              0 }
  377 };
  378 
  379 static struct resource_spec msk_irq_spec_legacy[] = {
  380         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  381         { -1,                   0,              0 }
  382 };
  383 
  384 static struct resource_spec msk_irq_spec_msi[] = {
  385         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  386         { -1,                   0,              0 }
  387 };
  388 
  389 static struct resource_spec msk_irq_spec_msi2[] = {
  390         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  391         { SYS_RES_IRQ,          2,              RF_ACTIVE },
  392         { -1,                   0,              0 }
  393 };
  394 
  395 static int
  396 msk_miibus_readreg(device_t dev, int phy, int reg)
  397 {
  398         struct msk_if_softc *sc_if;
  399 
  400         if (phy != PHY_ADDR_MARV)
  401                 return (0);
  402 
  403         sc_if = device_get_softc(dev);
  404 
  405         return (msk_phy_readreg(sc_if, phy, reg));
  406 }
  407 
  408 static int
  409 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  410 {
  411         struct msk_softc *sc;
  412         int i, val;
  413 
  414         sc = sc_if->msk_softc;
  415 
  416         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  417             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  418 
  419         for (i = 0; i < MSK_TIMEOUT; i++) {
  420                 DELAY(1);
  421                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  422                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  423                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  424                         break;
  425                 }
  426         }
  427 
  428         if (i == MSK_TIMEOUT) {
  429                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  430                 val = 0;
  431         }
  432 
  433         return (val);
  434 }
  435 
  436 static int
  437 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  438 {
  439         struct msk_if_softc *sc_if;
  440 
  441         if (phy != PHY_ADDR_MARV)
  442                 return (0);
  443 
  444         sc_if = device_get_softc(dev);
  445 
  446         return (msk_phy_writereg(sc_if, phy, reg, val));
  447 }
  448 
  449 static int
  450 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  451 {
  452         struct msk_softc *sc;
  453         int i;
  454 
  455         sc = sc_if->msk_softc;
  456 
  457         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  458         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  459             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  460         for (i = 0; i < MSK_TIMEOUT; i++) {
  461                 DELAY(1);
  462                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  463                     GM_SMI_CT_BUSY) == 0)
  464                         break;
  465         }
  466         if (i == MSK_TIMEOUT)
  467                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  468 
  469         return (0);
  470 }
  471 
  472 static void
  473 msk_miibus_statchg(device_t dev)
  474 {
  475         struct msk_softc *sc;
  476         struct msk_if_softc *sc_if;
  477         struct mii_data *mii;
  478         struct ifnet *ifp;
  479         uint32_t gmac;
  480 
  481         sc_if = device_get_softc(dev);
  482         sc = sc_if->msk_softc;
  483 
  484         MSK_IF_LOCK_ASSERT(sc_if);
  485 
  486         mii = device_get_softc(sc_if->msk_miibus);
  487         ifp = sc_if->msk_ifp;
  488         if (mii == NULL || ifp == NULL ||
  489             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  490                 return;
  491 
  492         sc_if->msk_flags &= ~MSK_FLAG_LINK;
  493         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  494             (IFM_AVALID | IFM_ACTIVE)) {
  495                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  496                 case IFM_10_T:
  497                 case IFM_100_TX:
  498                         sc_if->msk_flags |= MSK_FLAG_LINK;
  499                         break;
  500                 case IFM_1000_T:
  501                 case IFM_1000_SX:
  502                 case IFM_1000_LX:
  503                 case IFM_1000_CX:
  504                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  505                                 sc_if->msk_flags |= MSK_FLAG_LINK;
  506                         break;
  507                 default:
  508                         break;
  509                 }
  510         }
  511 
  512         if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
  513                 /* Enable Tx FIFO Underrun. */
  514                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  515                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  516                 /*
  517                  * Because mii(4) notify msk(4) that it detected link status
  518                  * change, there is no need to enable automatic
  519                  * speed/flow-control/duplex updates.
  520                  */
  521                 gmac = GM_GPCR_AU_ALL_DIS;
  522                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  523                 case IFM_1000_SX:
  524                 case IFM_1000_T:
  525                         gmac |= GM_GPCR_SPEED_1000;
  526                         break;
  527                 case IFM_100_TX:
  528                         gmac |= GM_GPCR_SPEED_100;
  529                         break;
  530                 case IFM_10_T:
  531                         break;
  532                 }
  533 
  534                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
  535                         gmac |= GM_GPCR_DUP_FULL;
  536                 /* Disable Rx flow control. */
  537                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
  538                         gmac |= GM_GPCR_FC_RX_DIS;
  539                 /* Disable Tx flow control. */
  540                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
  541                         gmac |= GM_GPCR_FC_TX_DIS;
  542                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  543                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  544                 /* Read again to ensure writing. */
  545                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  546 
  547                 gmac = GMC_PAUSE_ON;
  548                 if (((mii->mii_media_active & IFM_GMASK) &
  549                     (IFM_FLAG0 | IFM_FLAG1)) == 0)
  550                         gmac = GMC_PAUSE_OFF;
  551                 /* Diable pause for 10/100 Mbps in half-duplex mode. */
  552                 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
  553                     (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
  554                     IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
  555                         gmac = GMC_PAUSE_OFF;
  556                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  557 
  558                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  559                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  560                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  561         } else {
  562                 /*
  563                  * Link state changed to down.
  564                  * Disable PHY interrupts.
  565                  */
  566                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  567                 /* Disable Rx/Tx MAC. */
  568                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  569                 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
  570                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  571                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  572                         /* Read again to ensure writing. */
  573                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  574                 }
  575         }
  576 }
  577 
  578 static void
  579 msk_rxfilter(struct msk_if_softc *sc_if)
  580 {
  581         struct msk_softc *sc;
  582         struct ifnet *ifp;
  583         struct ifmultiaddr *ifma;
  584         uint32_t mchash[2];
  585         uint32_t crc;
  586         uint16_t mode;
  587 
  588         sc = sc_if->msk_softc;
  589 
  590         MSK_IF_LOCK_ASSERT(sc_if);
  591 
  592         ifp = sc_if->msk_ifp;
  593 
  594         bzero(mchash, sizeof(mchash));
  595         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  596         if ((ifp->if_flags & IFF_PROMISC) != 0)
  597                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  598         else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  599                 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  600                 mchash[0] = 0xffff;
  601                 mchash[1] = 0xffff;
  602         } else {
  603                 mode |= GM_RXCR_UCF_ENA;
  604                 if_maddr_rlock(ifp);
  605                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  606                         if (ifma->ifma_addr->sa_family != AF_LINK)
  607                                 continue;
  608                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  609                             ifma->ifma_addr), ETHER_ADDR_LEN);
  610                         /* Just want the 6 least significant bits. */
  611                         crc &= 0x3f;
  612                         /* Set the corresponding bit in the hash table. */
  613                         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  614                 }
  615                 if_maddr_runlock(ifp);
  616                 if (mchash[0] != 0 || mchash[1] != 0)
  617                         mode |= GM_RXCR_MCF_ENA;
  618         }
  619 
  620         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  621             mchash[0] & 0xffff);
  622         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  623             (mchash[0] >> 16) & 0xffff);
  624         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  625             mchash[1] & 0xffff);
  626         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  627             (mchash[1] >> 16) & 0xffff);
  628         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  629 }
  630 
  631 static void
  632 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  633 {
  634         struct msk_softc *sc;
  635 
  636         sc = sc_if->msk_softc;
  637         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  638                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  639                     RX_VLAN_STRIP_ON);
  640                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  641                     TX_VLAN_TAG_ON);
  642         } else {
  643                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  644                     RX_VLAN_STRIP_OFF);
  645                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  646                     TX_VLAN_TAG_OFF);
  647         }
  648 }
  649 
  650 static int
  651 msk_init_rx_ring(struct msk_if_softc *sc_if)
  652 {
  653         struct msk_ring_data *rd;
  654         struct msk_rxdesc *rxd;
  655         int i, prod;
  656 
  657         MSK_IF_LOCK_ASSERT(sc_if);
  658 
  659         sc_if->msk_cdata.msk_rx_cons = 0;
  660         sc_if->msk_cdata.msk_rx_prod = 0;
  661         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  662 
  663         rd = &sc_if->msk_rdata;
  664         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  665         prod = sc_if->msk_cdata.msk_rx_prod;
  666         for (i = 0; i < MSK_RX_RING_CNT; i++) {
  667                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  668                 rxd->rx_m = NULL;
  669                 rxd->rx_le = &rd->msk_rx_ring[prod];
  670                 if (msk_newbuf(sc_if, prod) != 0)
  671                         return (ENOBUFS);
  672                 MSK_INC(prod, MSK_RX_RING_CNT);
  673         }
  674 
  675         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  676             sc_if->msk_cdata.msk_rx_ring_map,
  677             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  678 
  679         /* Update prefetch unit. */
  680         sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
  681         CSR_WRITE_2(sc_if->msk_softc,
  682             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  683             sc_if->msk_cdata.msk_rx_prod);
  684 
  685         return (0);
  686 }
  687 
  688 static int
  689 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  690 {
  691         struct msk_ring_data *rd;
  692         struct msk_rxdesc *rxd;
  693         int i, prod;
  694 
  695         MSK_IF_LOCK_ASSERT(sc_if);
  696 
  697         sc_if->msk_cdata.msk_rx_cons = 0;
  698         sc_if->msk_cdata.msk_rx_prod = 0;
  699         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  700 
  701         rd = &sc_if->msk_rdata;
  702         bzero(rd->msk_jumbo_rx_ring,
  703             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  704         prod = sc_if->msk_cdata.msk_rx_prod;
  705         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  706                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  707                 rxd->rx_m = NULL;
  708                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  709                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  710                         return (ENOBUFS);
  711                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  712         }
  713 
  714         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  715             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  716             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  717 
  718         sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
  719         CSR_WRITE_2(sc_if->msk_softc,
  720             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  721             sc_if->msk_cdata.msk_rx_prod);
  722 
  723         return (0);
  724 }
  725 
  726 static void
  727 msk_init_tx_ring(struct msk_if_softc *sc_if)
  728 {
  729         struct msk_ring_data *rd;
  730         struct msk_txdesc *txd;
  731         int i;
  732 
  733         sc_if->msk_cdata.msk_tso_mtu = 0;
  734         sc_if->msk_cdata.msk_tx_prod = 0;
  735         sc_if->msk_cdata.msk_tx_cons = 0;
  736         sc_if->msk_cdata.msk_tx_cnt = 0;
  737 
  738         rd = &sc_if->msk_rdata;
  739         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  740         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  741                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  742                 txd->tx_m = NULL;
  743                 txd->tx_le = &rd->msk_tx_ring[i];
  744         }
  745 
  746         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
  747             sc_if->msk_cdata.msk_tx_ring_map,
  748             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  749 }
  750 
  751 static __inline void
  752 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  753 {
  754         struct msk_rx_desc *rx_le;
  755         struct msk_rxdesc *rxd;
  756         struct mbuf *m;
  757 
  758         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  759         m = rxd->rx_m;
  760         rx_le = rxd->rx_le;
  761         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  762 }
  763 
  764 static __inline void
  765 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  766 {
  767         struct msk_rx_desc *rx_le;
  768         struct msk_rxdesc *rxd;
  769         struct mbuf *m;
  770 
  771         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  772         m = rxd->rx_m;
  773         rx_le = rxd->rx_le;
  774         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  775 }
  776 
  777 static int
  778 msk_newbuf(struct msk_if_softc *sc_if, int idx)
  779 {
  780         struct msk_rx_desc *rx_le;
  781         struct msk_rxdesc *rxd;
  782         struct mbuf *m;
  783         bus_dma_segment_t segs[1];
  784         bus_dmamap_t map;
  785         int nsegs;
  786 
  787         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  788         if (m == NULL)
  789                 return (ENOBUFS);
  790 
  791         m->m_len = m->m_pkthdr.len = MCLBYTES;
  792         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  793                 m_adj(m, ETHER_ALIGN);
  794 #ifndef __NO_STRICT_ALIGNMENT
  795         else
  796                 m_adj(m, MSK_RX_BUF_ALIGN);
  797 #endif
  798 
  799         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
  800             sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
  801             BUS_DMA_NOWAIT) != 0) {
  802                 m_freem(m);
  803                 return (ENOBUFS);
  804         }
  805         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  806 
  807         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  808         if (rxd->rx_m != NULL) {
  809                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  810                     BUS_DMASYNC_POSTREAD);
  811                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  812         }
  813         map = rxd->rx_dmamap;
  814         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  815         sc_if->msk_cdata.msk_rx_sparemap = map;
  816         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  817             BUS_DMASYNC_PREREAD);
  818         rxd->rx_m = m;
  819         rx_le = rxd->rx_le;
  820         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  821         rx_le->msk_control =
  822             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  823 
  824         return (0);
  825 }
  826 
  827 static int
  828 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  829 {
  830         struct msk_rx_desc *rx_le;
  831         struct msk_rxdesc *rxd;
  832         struct mbuf *m;
  833         bus_dma_segment_t segs[1];
  834         bus_dmamap_t map;
  835         int nsegs;
  836 
  837         m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
  838         if (m == NULL)
  839                 return (ENOBUFS);
  840         if ((m->m_flags & M_EXT) == 0) {
  841                 m_freem(m);
  842                 return (ENOBUFS);
  843         }
  844         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
  845         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  846                 m_adj(m, ETHER_ALIGN);
  847 #ifndef __NO_STRICT_ALIGNMENT
  848         else
  849                 m_adj(m, MSK_RX_BUF_ALIGN);
  850 #endif
  851 
  852         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  853             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  854             BUS_DMA_NOWAIT) != 0) {
  855                 m_freem(m);
  856                 return (ENOBUFS);
  857         }
  858         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  859 
  860         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  861         if (rxd->rx_m != NULL) {
  862                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  863                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
  864                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
  865                     rxd->rx_dmamap);
  866         }
  867         map = rxd->rx_dmamap;
  868         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
  869         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
  870         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
  871             BUS_DMASYNC_PREREAD);
  872         rxd->rx_m = m;
  873         rx_le = rxd->rx_le;
  874         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  875         rx_le->msk_control =
  876             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  877 
  878         return (0);
  879 }
  880 
  881 /*
  882  * Set media options.
  883  */
  884 static int
  885 msk_mediachange(struct ifnet *ifp)
  886 {
  887         struct msk_if_softc *sc_if;
  888         struct mii_data *mii;
  889         int error;
  890 
  891         sc_if = ifp->if_softc;
  892 
  893         MSK_IF_LOCK(sc_if);
  894         mii = device_get_softc(sc_if->msk_miibus);
  895         error = mii_mediachg(mii);
  896         MSK_IF_UNLOCK(sc_if);
  897 
  898         return (error);
  899 }
  900 
  901 /*
  902  * Report current media status.
  903  */
  904 static void
  905 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  906 {
  907         struct msk_if_softc *sc_if;
  908         struct mii_data *mii;
  909 
  910         sc_if = ifp->if_softc;
  911         MSK_IF_LOCK(sc_if);
  912         if ((ifp->if_flags & IFF_UP) == 0) {
  913                 MSK_IF_UNLOCK(sc_if);
  914                 return;
  915         }
  916         mii = device_get_softc(sc_if->msk_miibus);
  917 
  918         mii_pollstat(mii);
  919         MSK_IF_UNLOCK(sc_if);
  920         ifmr->ifm_active = mii->mii_media_active;
  921         ifmr->ifm_status = mii->mii_media_status;
  922 }
  923 
  924 static int
  925 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  926 {
  927         struct msk_if_softc *sc_if;
  928         struct ifreq *ifr;
  929         struct mii_data *mii;
  930         int error, mask;
  931 
  932         sc_if = ifp->if_softc;
  933         ifr = (struct ifreq *)data;
  934         error = 0;
  935 
  936         switch(command) {
  937         case SIOCSIFMTU:
  938                 MSK_IF_LOCK(sc_if);
  939                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
  940                         error = EINVAL;
  941                 else if (ifp->if_mtu != ifr->ifr_mtu) {
  942                         if (ifr->ifr_mtu > ETHERMTU) {
  943                                 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
  944                                         error = EINVAL;
  945                                         MSK_IF_UNLOCK(sc_if);
  946                                         break;
  947                                 }
  948                                 if ((sc_if->msk_flags &
  949                                     MSK_FLAG_JUMBO_NOCSUM) != 0) {
  950                                         ifp->if_hwassist &=
  951                                             ~(MSK_CSUM_FEATURES | CSUM_TSO);
  952                                         ifp->if_capenable &=
  953                                             ~(IFCAP_TSO4 | IFCAP_TXCSUM);
  954                                         VLAN_CAPABILITIES(ifp);
  955                                 }
  956                         }
  957                         ifp->if_mtu = ifr->ifr_mtu;
  958                         msk_init_locked(sc_if);
  959                 }
  960                 MSK_IF_UNLOCK(sc_if);
  961                 break;
  962         case SIOCSIFFLAGS:
  963                 MSK_IF_LOCK(sc_if);
  964                 if ((ifp->if_flags & IFF_UP) != 0) {
  965                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
  966                             ((ifp->if_flags ^ sc_if->msk_if_flags) &
  967                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
  968                                 msk_rxfilter(sc_if);
  969                         else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
  970                                 msk_init_locked(sc_if);
  971                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  972                         msk_stop(sc_if);
  973                 sc_if->msk_if_flags = ifp->if_flags;
  974                 MSK_IF_UNLOCK(sc_if);
  975                 break;
  976         case SIOCADDMULTI:
  977         case SIOCDELMULTI:
  978                 MSK_IF_LOCK(sc_if);
  979                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
  980                         msk_rxfilter(sc_if);
  981                 MSK_IF_UNLOCK(sc_if);
  982                 break;
  983         case SIOCGIFMEDIA:
  984         case SIOCSIFMEDIA:
  985                 mii = device_get_softc(sc_if->msk_miibus);
  986                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
  987                 break;
  988         case SIOCSIFCAP:
  989                 MSK_IF_LOCK(sc_if);
  990                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  991                 if ((mask & IFCAP_TXCSUM) != 0 &&
  992                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
  993                         ifp->if_capenable ^= IFCAP_TXCSUM;
  994                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
  995                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
  996                         else
  997                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
  998                 }
  999                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1000                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
 1001                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1002                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1003                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1004                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1005                         msk_setvlan(sc_if, ifp);
 1006                 }
 1007                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1008                     (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
 1009                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1010                 if ((mask & IFCAP_TSO4) != 0 &&
 1011                     (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1012                         ifp->if_capenable ^= IFCAP_TSO4;
 1013                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1014                                 ifp->if_hwassist |= CSUM_TSO;
 1015                         else
 1016                                 ifp->if_hwassist &= ~CSUM_TSO;
 1017                 }
 1018                 if (ifp->if_mtu > ETHERMTU &&
 1019                     (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1020                         ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1021                         ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1022                 }
 1023 
 1024                 VLAN_CAPABILITIES(ifp);
 1025                 MSK_IF_UNLOCK(sc_if);
 1026                 break;
 1027         default:
 1028                 error = ether_ioctl(ifp, command, data);
 1029                 break;
 1030         }
 1031 
 1032         return (error);
 1033 }
 1034 
 1035 static int
 1036 mskc_probe(device_t dev)
 1037 {
 1038         struct msk_product *mp;
 1039         uint16_t vendor, devid;
 1040         int i;
 1041 
 1042         vendor = pci_get_vendor(dev);
 1043         devid = pci_get_device(dev);
 1044         mp = msk_products;
 1045         for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
 1046             i++, mp++) {
 1047                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
 1048                         device_set_desc(dev, mp->msk_name);
 1049                         return (BUS_PROBE_DEFAULT);
 1050                 }
 1051         }
 1052 
 1053         return (ENXIO);
 1054 }
 1055 
 1056 static int
 1057 mskc_setup_rambuffer(struct msk_softc *sc)
 1058 {
 1059         int next;
 1060         int i;
 1061 
 1062         /* Get adapter SRAM size. */
 1063         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
 1064         if (bootverbose)
 1065                 device_printf(sc->msk_dev,
 1066                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
 1067         if (sc->msk_ramsize == 0)
 1068                 return (0);
 1069 
 1070         sc->msk_pflags |= MSK_FLAG_RAMBUF;
 1071         /*
 1072          * Give receiver 2/3 of memory and round down to the multiple
 1073          * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
 1074          * of 1024.
 1075          */
 1076         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
 1077         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
 1078         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
 1079                 sc->msk_rxqstart[i] = next;
 1080                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
 1081                 next = sc->msk_rxqend[i] + 1;
 1082                 sc->msk_txqstart[i] = next;
 1083                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
 1084                 next = sc->msk_txqend[i] + 1;
 1085                 if (bootverbose) {
 1086                         device_printf(sc->msk_dev,
 1087                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1088                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1089                             sc->msk_rxqend[i]);
 1090                         device_printf(sc->msk_dev,
 1091                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1092                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1093                             sc->msk_txqend[i]);
 1094                 }
 1095         }
 1096 
 1097         return (0);
 1098 }
 1099 
 1100 static void
 1101 msk_phy_power(struct msk_softc *sc, int mode)
 1102 {
 1103         uint32_t our, val;
 1104         int i;
 1105 
 1106         switch (mode) {
 1107         case MSK_PHY_POWERUP:
 1108                 /* Switch power to VCC (WA for VAUX problem). */
 1109                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1110                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1111                 /* Disable Core Clock Division, set Clock Select to 0. */
 1112                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1113 
 1114                 val = 0;
 1115                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1116                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1117                         /* Enable bits are inverted. */
 1118                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1119                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1120                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1121                 }
 1122                 /*
 1123                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1124                  */
 1125                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1126 
 1127                 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1128                 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1129                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1130                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1131                                 /* Deassert Low Power for 1st PHY. */
 1132                                 val |= PCI_Y2_PHY1_COMA;
 1133                                 if (sc->msk_num_port > 1)
 1134                                         val |= PCI_Y2_PHY2_COMA;
 1135                         }
 1136                 }
 1137                 /* Release PHY from PowerDown/COMA mode. */
 1138                 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1139                 switch (sc->msk_hw_id) {
 1140                 case CHIP_ID_YUKON_EC_U:
 1141                 case CHIP_ID_YUKON_EX:
 1142                 case CHIP_ID_YUKON_FE_P:
 1143                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF);
 1144 
 1145                         /* Enable all clocks. */
 1146                         pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
 1147                         our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
 1148                         our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
 1149                             PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
 1150                         /* Set all bits to 0 except bits 15..12. */
 1151                         pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
 1152                         our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4);
 1153                         our &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1154                         pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4);
 1155                         pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4);
 1156                         /*
 1157                          * Disable status race, workaround for
 1158                          * Yukon EC Ultra & Yukon EX.
 1159                          */
 1160                         val = CSR_READ_4(sc, B2_GP_IO);
 1161                         val |= GLB_GPIO_STAT_RACE_DIS;
 1162                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1163                         CSR_READ_4(sc, B2_GP_IO);
 1164                         break;
 1165                 default:
 1166                         break;
 1167                 }
 1168                 for (i = 0; i < sc->msk_num_port; i++) {
 1169                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1170                             GMLC_RST_SET);
 1171                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1172                             GMLC_RST_CLR);
 1173                 }
 1174                 break;
 1175         case MSK_PHY_POWERDOWN:
 1176                 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1177                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1178                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1179                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1180                         val &= ~PCI_Y2_PHY1_COMA;
 1181                         if (sc->msk_num_port > 1)
 1182                                 val &= ~PCI_Y2_PHY2_COMA;
 1183                 }
 1184                 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1185 
 1186                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1187                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1188                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1189                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1190                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1191                         /* Enable bits are inverted. */
 1192                         val = 0;
 1193                 }
 1194                 /*
 1195                  * Disable PCI & Core Clock, disable clock gating for
 1196                  * both Links.
 1197                  */
 1198                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1199                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1200                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1201                 break;
 1202         default:
 1203                 break;
 1204         }
 1205 }
 1206 
 1207 static void
 1208 mskc_reset(struct msk_softc *sc)
 1209 {
 1210         bus_addr_t addr;
 1211         uint16_t status;
 1212         uint32_t val;
 1213         int i;
 1214 
 1215         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1216 
 1217         /* Disable ASF. */
 1218         if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
 1219                 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1220                 /* Clear AHB bridge & microcontroller reset. */
 1221                 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1222                     Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1223                 /* Clear ASF microcontroller state. */
 1224                 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1225                 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1226         } else
 1227                 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1228         CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1229 
 1230         /*
 1231          * Since we disabled ASF, S/W reset is required for Power Management.
 1232          */
 1233         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1234         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1235 
 1236         /* Clear all error bits in the PCI status register. */
 1237         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1238         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1239 
 1240         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1241             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1242             PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
 1243         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1244 
 1245         switch (sc->msk_bustype) {
 1246         case MSK_PEX_BUS:
 1247                 /* Clear all PEX errors. */
 1248                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1249                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1250                 if ((val & PEX_RX_OV) != 0) {
 1251                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1252                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1253                 }
 1254                 break;
 1255         case MSK_PCI_BUS:
 1256         case MSK_PCIX_BUS:
 1257                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1258                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1259                 if (val == 0)
 1260                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1261                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1262                         /* Set Cache Line Size opt. */
 1263                         val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1264                         val |= PCI_CLS_OPT;
 1265                         pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1266                 }
 1267                 break;
 1268         }
 1269         /* Set PHY power state. */
 1270         msk_phy_power(sc, MSK_PHY_POWERUP);
 1271 
 1272         /* Reset GPHY/GMAC Control */
 1273         for (i = 0; i < sc->msk_num_port; i++) {
 1274                 /* GPHY Control reset. */
 1275                 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1276                 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1277                 /* GMAC Control reset. */
 1278                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1279                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1280                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1281                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
 1282                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1283                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1284                             GMC_BYP_RETR_ON);
 1285         }
 1286         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1287 
 1288         /* LED On. */
 1289         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1290 
 1291         /* Clear TWSI IRQ. */
 1292         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1293 
 1294         /* Turn off hardware timer. */
 1295         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1296         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1297 
 1298         /* Turn off descriptor polling. */
 1299         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1300 
 1301         /* Turn off time stamps. */
 1302         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1303         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1304 
 1305         /* Configure timeout values. */
 1306         for (i = 0; i < sc->msk_num_port; i++) {
 1307                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
 1308                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
 1309                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1310                     MSK_RI_TO_53);
 1311                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1312                     MSK_RI_TO_53);
 1313                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1314                     MSK_RI_TO_53);
 1315                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1316                     MSK_RI_TO_53);
 1317                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1318                     MSK_RI_TO_53);
 1319                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1320                     MSK_RI_TO_53);
 1321                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1322                     MSK_RI_TO_53);
 1323                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1324                     MSK_RI_TO_53);
 1325                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1326                     MSK_RI_TO_53);
 1327                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1328                     MSK_RI_TO_53);
 1329                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1330                     MSK_RI_TO_53);
 1331                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1332                     MSK_RI_TO_53);
 1333         }
 1334 
 1335         /* Disable all interrupts. */
 1336         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1337         CSR_READ_4(sc, B0_HWE_IMSK);
 1338         CSR_WRITE_4(sc, B0_IMSK, 0);
 1339         CSR_READ_4(sc, B0_IMSK);
 1340 
 1341         /*
 1342          * On dual port PCI-X card, there is an problem where status
 1343          * can be received out of order due to split transactions.
 1344          */
 1345         if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
 1346                 int pcix;
 1347                 uint16_t pcix_cmd;
 1348 
 1349                 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) {
 1350                         pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
 1351                         /* Clear Max Outstanding Split Transactions. */
 1352                         pcix_cmd &= ~0x70;
 1353                         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1354                         pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
 1355                         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1356                 }
 1357         }
 1358         if (sc->msk_bustype == MSK_PEX_BUS) {
 1359                 uint16_t v, width;
 1360 
 1361                 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
 1362                 /* Change Max. Read Request Size to 4096 bytes. */
 1363                 v &= ~PEX_DC_MAX_RRS_MSK;
 1364                 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
 1365                 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
 1366                 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
 1367                 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
 1368                 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
 1369                 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
 1370                 if (v != width)
 1371                         device_printf(sc->msk_dev,
 1372                             "negotiated width of link(x%d) != "
 1373                             "max. width of link(x%d)\n", width, v); 
 1374         }
 1375 
 1376         /* Clear status list. */
 1377         bzero(sc->msk_stat_ring,
 1378             sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
 1379         sc->msk_stat_cons = 0;
 1380         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 1381             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1382         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1383         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1384         /* Set the status list base address. */
 1385         addr = sc->msk_stat_ring_paddr;
 1386         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1387         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1388         /* Set the status list last index. */
 1389         CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
 1390         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1391             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1392                 /* WA for dev. #4.3 */
 1393                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1394                 /* WA for dev. #4.18 */
 1395                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1396                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1397         } else {
 1398                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1399                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1400                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1401                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1402                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1403                 else
 1404                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1405                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1406         }
 1407         /*
 1408          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1409          */
 1410         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1411 
 1412         /* Enable status unit. */
 1413         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1414 
 1415         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1416         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1417         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1418 }
 1419 
 1420 static int
 1421 msk_probe(device_t dev)
 1422 {
 1423         struct msk_softc *sc;
 1424         char desc[100];
 1425 
 1426         sc = device_get_softc(device_get_parent(dev));
 1427         /*
 1428          * Not much to do here. We always know there will be
 1429          * at least one GMAC present, and if there are two,
 1430          * mskc_attach() will create a second device instance
 1431          * for us.
 1432          */
 1433         snprintf(desc, sizeof(desc),
 1434             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1435             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1436             sc->msk_hw_rev);
 1437         device_set_desc_copy(dev, desc);
 1438 
 1439         return (BUS_PROBE_DEFAULT);
 1440 }
 1441 
 1442 static int
 1443 msk_attach(device_t dev)
 1444 {
 1445         struct msk_softc *sc;
 1446         struct msk_if_softc *sc_if;
 1447         struct ifnet *ifp;
 1448         int i, port, error;
 1449         uint8_t eaddr[6];
 1450 
 1451         if (dev == NULL)
 1452                 return (EINVAL);
 1453 
 1454         error = 0;
 1455         sc_if = device_get_softc(dev);
 1456         sc = device_get_softc(device_get_parent(dev));
 1457         port = *(int *)device_get_ivars(dev);
 1458 
 1459         sc_if->msk_if_dev = dev;
 1460         sc_if->msk_port = port;
 1461         sc_if->msk_softc = sc;
 1462         sc_if->msk_flags = sc->msk_pflags;
 1463         sc->msk_if[port] = sc_if;
 1464         /* Setup Tx/Rx queue register offsets. */
 1465         if (port == MSK_PORT_A) {
 1466                 sc_if->msk_txq = Q_XA1;
 1467                 sc_if->msk_txsq = Q_XS1;
 1468                 sc_if->msk_rxq = Q_R1;
 1469         } else {
 1470                 sc_if->msk_txq = Q_XA2;
 1471                 sc_if->msk_txsq = Q_XS2;
 1472                 sc_if->msk_rxq = Q_R2;
 1473         }
 1474 
 1475         callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
 1476         msk_sysctl_node(sc_if);
 1477 
 1478         if ((error = msk_txrx_dma_alloc(sc_if) != 0))
 1479                 goto fail;
 1480         msk_rx_dma_jalloc(sc_if);
 1481 
 1482         ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
 1483         if (ifp == NULL) {
 1484                 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
 1485                 error = ENOSPC;
 1486                 goto fail;
 1487         }
 1488         ifp->if_softc = sc_if;
 1489         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1490         ifp->if_mtu = ETHERMTU;
 1491         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1492         /*
 1493          * IFCAP_RXCSUM capability is intentionally disabled as the hardware
 1494          * has serious bug in Rx checksum offload for all Yukon II family
 1495          * hardware. It seems there is a workaround to make it work somtimes.
 1496          * However, the workaround also have to check OP code sequences to
 1497          * verify whether the OP code is correct. Sometimes it should compute
 1498          * IP/TCP/UDP checksum in driver in order to verify correctness of
 1499          * checksum computed by hardware. If you have to compute checksum
 1500          * with software to verify the hardware's checksum why have hardware
 1501          * compute the checksum? I think there is no reason to spend time to
 1502          * make Rx checksum offload work on Yukon II hardware.
 1503          */
 1504         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
 1505         /*
 1506          * Enable Rx checksum offloading if controller support new
 1507          * descriptor format.
 1508          */
 1509         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 
 1510             (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1511                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1512         ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
 1513         ifp->if_capenable = ifp->if_capabilities;
 1514         ifp->if_ioctl = msk_ioctl;
 1515         ifp->if_start = msk_start;
 1516         ifp->if_timer = 0;
 1517         ifp->if_watchdog = NULL;
 1518         ifp->if_init = msk_init;
 1519         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1520         ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
 1521         IFQ_SET_READY(&ifp->if_snd);
 1522 
 1523         TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp);
 1524 
 1525         /*
 1526          * Get station address for this interface. Note that
 1527          * dual port cards actually come with three station
 1528          * addresses: one for each port, plus an extra. The
 1529          * extra one is used by the SysKonnect driver software
 1530          * as a 'virtual' station address for when both ports
 1531          * are operating in failover mode. Currently we don't
 1532          * use this extra address.
 1533          */
 1534         MSK_IF_LOCK(sc_if);
 1535         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1536                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1537 
 1538         /*
 1539          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1540          */
 1541         MSK_IF_UNLOCK(sc_if);
 1542         ether_ifattach(ifp, eaddr);
 1543         MSK_IF_LOCK(sc_if);
 1544 
 1545         /* VLAN capability setup */
 1546         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1547         if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
 1548                 /*
 1549                  * Due to Tx checksum offload hardware bugs, msk(4) manually
 1550                  * computes checksum for short frames. For VLAN tagged frames
 1551                  * this workaround does not work so disable checksum offload
 1552                  * for VLAN interface.
 1553                  */
 1554                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
 1555                 /*
 1556                  * Enable Rx checksum offloading for VLAN taggedd frames
 1557                  * if controller support new descriptor format.
 1558                  */
 1559                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 
 1560                     (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1561                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
 1562         }
 1563         ifp->if_capenable = ifp->if_capabilities;
 1564 
 1565         /*
 1566          * Tell the upper layer(s) we support long frames.
 1567          * Must appear after the call to ether_ifattach() because
 1568          * ether_ifattach() sets ifi_hdrlen to the default value.
 1569          */
 1570         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1571 
 1572         /*
 1573          * Do miibus setup.
 1574          */
 1575         MSK_IF_UNLOCK(sc_if);
 1576         error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange,
 1577             msk_mediastatus);
 1578         if (error != 0) {
 1579                 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
 1580                 ether_ifdetach(ifp);
 1581                 error = ENXIO;
 1582                 goto fail;
 1583         }
 1584 
 1585 fail:
 1586         if (error != 0) {
 1587                 /* Access should be ok even though lock has been dropped */
 1588                 sc->msk_if[port] = NULL;
 1589                 msk_detach(dev);
 1590         }
 1591 
 1592         return (error);
 1593 }
 1594 
 1595 /*
 1596  * Attach the interface. Allocate softc structures, do ifmedia
 1597  * setup and ethernet/BPF attach.
 1598  */
 1599 static int
 1600 mskc_attach(device_t dev)
 1601 {
 1602         struct msk_softc *sc;
 1603         int error, msic, msir, *port, reg;
 1604 
 1605         sc = device_get_softc(dev);
 1606         sc->msk_dev = dev;
 1607         mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1608             MTX_DEF);
 1609 
 1610         /*
 1611          * Map control/status registers.
 1612          */
 1613         pci_enable_busmaster(dev);
 1614 
 1615         /* Allocate I/O resource */
 1616 #ifdef MSK_USEIOSPACE
 1617         sc->msk_res_spec = msk_res_spec_io;
 1618 #else
 1619         sc->msk_res_spec = msk_res_spec_mem;
 1620 #endif
 1621         sc->msk_irq_spec = msk_irq_spec_legacy;
 1622         error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1623         if (error) {
 1624                 if (sc->msk_res_spec == msk_res_spec_mem)
 1625                         sc->msk_res_spec = msk_res_spec_io;
 1626                 else
 1627                         sc->msk_res_spec = msk_res_spec_mem;
 1628                 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1629                 if (error) {
 1630                         device_printf(dev, "couldn't allocate %s resources\n",
 1631                             sc->msk_res_spec == msk_res_spec_mem ? "memory" :
 1632                             "I/O");
 1633                         mtx_destroy(&sc->msk_mtx);
 1634                         return (ENXIO);
 1635                 }
 1636         }
 1637 
 1638         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1639         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1640         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1641         /* Bail out if chip is not recognized. */
 1642         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1643             sc->msk_hw_id > CHIP_ID_YUKON_FE_P) {
 1644                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1645                     sc->msk_hw_id, sc->msk_hw_rev);
 1646                 mtx_destroy(&sc->msk_mtx);
 1647                 return (ENXIO);
 1648         }
 1649 
 1650         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1651             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1652             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 1653             &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
 1654             "max number of Rx events to process");
 1655 
 1656         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1657         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1658             "process_limit", &sc->msk_process_limit);
 1659         if (error == 0) {
 1660                 if (sc->msk_process_limit < MSK_PROC_MIN ||
 1661                     sc->msk_process_limit > MSK_PROC_MAX) {
 1662                         device_printf(dev, "process_limit value out of range; "
 1663                             "using default: %d\n", MSK_PROC_DEFAULT);
 1664                         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1665                 }
 1666         }
 1667 
 1668         /* Soft reset. */
 1669         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1670         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1671         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1672          if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1673                  sc->msk_coppertype = 0;
 1674          else
 1675                  sc->msk_coppertype = 1;
 1676         /* Check number of MACs. */
 1677         sc->msk_num_port = 1;
 1678         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1679             CFG_DUAL_MAC_MSK) {
 1680                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1681                         sc->msk_num_port++;
 1682         }
 1683 
 1684         /* Check bus type. */
 1685         if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0)
 1686                 sc->msk_bustype = MSK_PEX_BUS;
 1687         else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0)
 1688                 sc->msk_bustype = MSK_PCIX_BUS;
 1689         else
 1690                 sc->msk_bustype = MSK_PCI_BUS;
 1691 
 1692         switch (sc->msk_hw_id) {
 1693         case CHIP_ID_YUKON_EC:
 1694                 sc->msk_clock = 125;    /* 125 Mhz */
 1695                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1696                 break;
 1697         case CHIP_ID_YUKON_EC_U:
 1698                 sc->msk_clock = 125;    /* 125 Mhz */
 1699                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
 1700                 break;
 1701         case CHIP_ID_YUKON_EX:
 1702                 sc->msk_clock = 125;    /* 125 Mhz */
 1703                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1704                     MSK_FLAG_AUTOTX_CSUM;
 1705                 /*
 1706                  * Yukon Extreme seems to have silicon bug for
 1707                  * automatic Tx checksum calculation capability.
 1708                  */
 1709                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 1710                         sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
 1711                 /*
 1712                  * Yukon Extreme A0 could not use store-and-forward
 1713                  * for jumbo frames, so disable Tx checksum
 1714                  * offloading for jumbo frames.
 1715                  */
 1716                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 1717                         sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
 1718                 break;
 1719         case CHIP_ID_YUKON_FE:
 1720                 sc->msk_clock = 100;    /* 100 Mhz */
 1721                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1722                 break;
 1723         case CHIP_ID_YUKON_FE_P:
 1724                 sc->msk_clock = 50;     /* 50 Mhz */
 1725                 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
 1726                     MSK_FLAG_AUTOTX_CSUM;
 1727                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1728                         /*
 1729                          * XXX
 1730                          * FE+ A0 has status LE writeback bug so msk(4)
 1731                          * does not rely on status word of received frame
 1732                          * in msk_rxeof() which in turn disables all
 1733                          * hardware assistance bits reported by the status
 1734                          * word as well as validity of the recevied frame.
 1735                          * Just pass received frames to upper stack with
 1736                          * minimal test and let upper stack handle them.
 1737                          */
 1738                         sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
 1739                             MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
 1740                 }
 1741                 break;
 1742         case CHIP_ID_YUKON_XL:
 1743                 sc->msk_clock = 156;    /* 156 Mhz */
 1744                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1745                 break;
 1746         default:
 1747                 sc->msk_clock = 156;    /* 156 Mhz */
 1748                 break;
 1749         }
 1750 
 1751         /* Allocate IRQ resources. */
 1752         msic = pci_msi_count(dev);
 1753         if (bootverbose)
 1754                 device_printf(dev, "MSI count : %d\n", msic);
 1755         /*
 1756          * The Yukon II reports it can handle two messages, one for each
 1757          * possible port.  We go ahead and allocate two messages and only
 1758          * setup a handler for both if we have a dual port card.
 1759          *
 1760          * XXX: I haven't untangled the interrupt handler to handle dual
 1761          * port cards with separate MSI messages, so for now I disable MSI
 1762          * on dual port cards.
 1763          */
 1764         if (legacy_intr != 0)
 1765                 msi_disable = 1;
 1766         if (msi_disable == 0) {
 1767                 switch (msic) {
 1768                 case 2:
 1769                 case 1: /* 88E8058 reports 1 MSI message */
 1770                         msir = msic;
 1771                         if (sc->msk_num_port == 1 &&
 1772                             pci_alloc_msi(dev, &msir) == 0) {
 1773                                 if (msic == msir) {
 1774                                         sc->msk_pflags |= MSK_FLAG_MSI;
 1775                                         sc->msk_irq_spec = msic == 2 ?
 1776                                             msk_irq_spec_msi2 :
 1777                                             msk_irq_spec_msi;
 1778                                 } else
 1779                                         pci_release_msi(dev);
 1780                         }
 1781                         break;
 1782                 default:
 1783                         device_printf(dev,
 1784                             "Unexpected number of MSI messages : %d\n", msic);
 1785                         break;
 1786                 }
 1787         }
 1788 
 1789         error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1790         if (error) {
 1791                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1792                 goto fail;
 1793         }
 1794 
 1795         if ((error = msk_status_dma_alloc(sc)) != 0)
 1796                 goto fail;
 1797 
 1798         /* Set base interrupt mask. */
 1799         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1800         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1801             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1802 
 1803         /* Reset the adapter. */
 1804         mskc_reset(sc);
 1805 
 1806         if ((error = mskc_setup_rambuffer(sc)) != 0)
 1807                 goto fail;
 1808 
 1809         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1810         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1811                 device_printf(dev, "failed to add child for PORT_A\n");
 1812                 error = ENXIO;
 1813                 goto fail;
 1814         }
 1815         port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
 1816         if (port == NULL) {
 1817                 device_printf(dev, "failed to allocate memory for "
 1818                     "ivars of PORT_A\n");
 1819                 error = ENXIO;
 1820                 goto fail;
 1821         }
 1822         *port = MSK_PORT_A;
 1823         device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
 1824 
 1825         if (sc->msk_num_port > 1) {
 1826                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1827                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1828                         device_printf(dev, "failed to add child for PORT_B\n");
 1829                         error = ENXIO;
 1830                         goto fail;
 1831                 }
 1832                 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
 1833                 if (port == NULL) {
 1834                         device_printf(dev, "failed to allocate memory for "
 1835                             "ivars of PORT_B\n");
 1836                         error = ENXIO;
 1837                         goto fail;
 1838                 }
 1839                 *port = MSK_PORT_B;
 1840                 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
 1841         }
 1842 
 1843         error = bus_generic_attach(dev);
 1844         if (error) {
 1845                 device_printf(dev, "failed to attach port(s)\n");
 1846                 goto fail;
 1847         }
 1848 
 1849         /* Hook interrupt last to avoid having to lock softc. */
 1850         if (legacy_intr)
 1851                 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 1852                     INTR_MPSAFE, NULL, msk_legacy_intr, sc,
 1853                     &sc->msk_intrhand[0]);
 1854         else {
 1855                 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc);
 1856                 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK,
 1857                     taskqueue_thread_enqueue, &sc->msk_tq);
 1858                 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq",
 1859                     device_get_nameunit(sc->msk_dev));
 1860                 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 1861                     INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]);
 1862         }
 1863 
 1864         if (error != 0) {
 1865                 device_printf(dev, "couldn't set up interrupt handler\n");
 1866                 if (legacy_intr == 0)
 1867                         taskqueue_free(sc->msk_tq);
 1868                 sc->msk_tq = NULL;
 1869                 goto fail;
 1870         }
 1871 fail:
 1872         if (error != 0)
 1873                 mskc_detach(dev);
 1874 
 1875         return (error);
 1876 }
 1877 
 1878 /*
 1879  * Shutdown hardware and free up resources. This can be called any
 1880  * time after the mutex has been initialized. It is called in both
 1881  * the error case in attach and the normal detach case so it needs
 1882  * to be careful about only freeing resources that have actually been
 1883  * allocated.
 1884  */
 1885 static int
 1886 msk_detach(device_t dev)
 1887 {
 1888         struct msk_softc *sc;
 1889         struct msk_if_softc *sc_if;
 1890         struct ifnet *ifp;
 1891 
 1892         sc_if = device_get_softc(dev);
 1893         KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
 1894             ("msk mutex not initialized in msk_detach"));
 1895         MSK_IF_LOCK(sc_if);
 1896 
 1897         ifp = sc_if->msk_ifp;
 1898         if (device_is_attached(dev)) {
 1899                 /* XXX */
 1900                 sc_if->msk_flags |= MSK_FLAG_DETACH;
 1901                 msk_stop(sc_if);
 1902                 /* Can't hold locks while calling detach. */
 1903                 MSK_IF_UNLOCK(sc_if);
 1904                 callout_drain(&sc_if->msk_tick_ch);
 1905                 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task);
 1906                 ether_ifdetach(ifp);
 1907                 MSK_IF_LOCK(sc_if);
 1908         }
 1909 
 1910         /*
 1911          * We're generally called from mskc_detach() which is using
 1912          * device_delete_child() to get to here. It's already trashed
 1913          * miibus for us, so don't do it here or we'll panic.
 1914          *
 1915          * if (sc_if->msk_miibus != NULL) {
 1916          *      device_delete_child(dev, sc_if->msk_miibus);
 1917          *      sc_if->msk_miibus = NULL;
 1918          * }
 1919          */
 1920 
 1921         msk_rx_dma_jfree(sc_if);
 1922         msk_txrx_dma_free(sc_if);
 1923         bus_generic_detach(dev);
 1924 
 1925         if (ifp)
 1926                 if_free(ifp);
 1927         sc = sc_if->msk_softc;
 1928         sc->msk_if[sc_if->msk_port] = NULL;
 1929         MSK_IF_UNLOCK(sc_if);
 1930 
 1931         return (0);
 1932 }
 1933 
 1934 static int
 1935 mskc_detach(device_t dev)
 1936 {
 1937         struct msk_softc *sc;
 1938 
 1939         sc = device_get_softc(dev);
 1940         KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
 1941 
 1942         if (device_is_alive(dev)) {
 1943                 if (sc->msk_devs[MSK_PORT_A] != NULL) {
 1944                         free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
 1945                             M_DEVBUF);
 1946                         device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
 1947                 }
 1948                 if (sc->msk_devs[MSK_PORT_B] != NULL) {
 1949                         free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
 1950                             M_DEVBUF);
 1951                         device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
 1952                 }
 1953                 bus_generic_detach(dev);
 1954         }
 1955 
 1956         /* Disable all interrupts. */
 1957         CSR_WRITE_4(sc, B0_IMSK, 0);
 1958         CSR_READ_4(sc, B0_IMSK);
 1959         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1960         CSR_READ_4(sc, B0_HWE_IMSK);
 1961 
 1962         /* LED Off. */
 1963         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 1964 
 1965         /* Put hardware reset. */
 1966         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1967 
 1968         msk_status_dma_free(sc);
 1969 
 1970         if (legacy_intr == 0 && sc->msk_tq != NULL) {
 1971                 taskqueue_drain(sc->msk_tq, &sc->msk_int_task);
 1972                 taskqueue_free(sc->msk_tq);
 1973                 sc->msk_tq = NULL;
 1974         }
 1975         if (sc->msk_intrhand[0]) {
 1976                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
 1977                 sc->msk_intrhand[0] = NULL;
 1978         }
 1979         if (sc->msk_intrhand[1]) {
 1980                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]);
 1981                 sc->msk_intrhand[1] = NULL;
 1982         }
 1983         bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1984         if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
 1985                 pci_release_msi(dev);
 1986         bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
 1987         mtx_destroy(&sc->msk_mtx);
 1988 
 1989         return (0);
 1990 }
 1991 
 1992 struct msk_dmamap_arg {
 1993         bus_addr_t      msk_busaddr;
 1994 };
 1995 
 1996 static void
 1997 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 1998 {
 1999         struct msk_dmamap_arg *ctx;
 2000 
 2001         if (error != 0)
 2002                 return;
 2003         ctx = arg;
 2004         ctx->msk_busaddr = segs[0].ds_addr;
 2005 }
 2006 
 2007 /* Create status DMA region. */
 2008 static int
 2009 msk_status_dma_alloc(struct msk_softc *sc)
 2010 {
 2011         struct msk_dmamap_arg ctx;
 2012         int error;
 2013 
 2014         error = bus_dma_tag_create(
 2015                     bus_get_dma_tag(sc->msk_dev),       /* parent */
 2016                     MSK_STAT_ALIGN, 0,          /* alignment, boundary */
 2017                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2018                     BUS_SPACE_MAXADDR,          /* highaddr */
 2019                     NULL, NULL,                 /* filter, filterarg */
 2020                     MSK_STAT_RING_SZ,           /* maxsize */
 2021                     1,                          /* nsegments */
 2022                     MSK_STAT_RING_SZ,           /* maxsegsize */
 2023                     0,                          /* flags */
 2024                     NULL, NULL,                 /* lockfunc, lockarg */
 2025                     &sc->msk_stat_tag);
 2026         if (error != 0) {
 2027                 device_printf(sc->msk_dev,
 2028                     "failed to create status DMA tag\n");
 2029                 return (error);
 2030         }
 2031 
 2032         /* Allocate DMA'able memory and load the DMA map for status ring. */
 2033         error = bus_dmamem_alloc(sc->msk_stat_tag,
 2034             (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
 2035             BUS_DMA_ZERO, &sc->msk_stat_map);
 2036         if (error != 0) {
 2037                 device_printf(sc->msk_dev,
 2038                     "failed to allocate DMA'able memory for status ring\n");
 2039                 return (error);
 2040         }
 2041 
 2042         ctx.msk_busaddr = 0;
 2043         error = bus_dmamap_load(sc->msk_stat_tag,
 2044             sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ,
 2045             msk_dmamap_cb, &ctx, 0);
 2046         if (error != 0) {
 2047                 device_printf(sc->msk_dev,
 2048                     "failed to load DMA'able memory for status ring\n");
 2049                 return (error);
 2050         }
 2051         sc->msk_stat_ring_paddr = ctx.msk_busaddr;
 2052 
 2053         return (0);
 2054 }
 2055 
 2056 static void
 2057 msk_status_dma_free(struct msk_softc *sc)
 2058 {
 2059 
 2060         /* Destroy status block. */
 2061         if (sc->msk_stat_tag) {
 2062                 if (sc->msk_stat_map) {
 2063                         bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 2064                         if (sc->msk_stat_ring) {
 2065                                 bus_dmamem_free(sc->msk_stat_tag,
 2066                                     sc->msk_stat_ring, sc->msk_stat_map);
 2067                                 sc->msk_stat_ring = NULL;
 2068                         }
 2069                         sc->msk_stat_map = NULL;
 2070                 }
 2071                 bus_dma_tag_destroy(sc->msk_stat_tag);
 2072                 sc->msk_stat_tag = NULL;
 2073         }
 2074 }
 2075 
 2076 static int
 2077 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 2078 {
 2079         struct msk_dmamap_arg ctx;
 2080         struct msk_txdesc *txd;
 2081         struct msk_rxdesc *rxd;
 2082         bus_size_t rxalign;
 2083         int error, i;
 2084 
 2085         /* Create parent DMA tag. */
 2086         /*
 2087          * XXX
 2088          * It seems that Yukon II supports full 64bits DMA operations. But
 2089          * it needs two descriptors(list elements) for 64bits DMA operations.
 2090          * Since we don't know what DMA address mappings(32bits or 64bits)
 2091          * would be used in advance for each mbufs, we limits its DMA space
 2092          * to be in range of 32bits address space. Otherwise, we should check
 2093          * what DMA address is used and chain another descriptor for the
 2094          * 64bits DMA operation. This also means descriptor ring size is
 2095          * variable. Limiting DMA address to be in 32bit address space greatly
 2096          * simplyfies descriptor handling and possibly would increase
 2097          * performance a bit due to efficient handling of descriptors.
 2098          * Apart from harassing checksum offloading mechanisms, it seems
 2099          * it's really bad idea to use a seperate descriptor for 64bit
 2100          * DMA operation to save small descriptor memory. Anyway, I've
 2101          * never seen these exotic scheme on ethernet interface hardware.
 2102          */
 2103         error = bus_dma_tag_create(
 2104                     bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
 2105                     1, 0,                       /* alignment, boundary */
 2106                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 2107                     BUS_SPACE_MAXADDR,          /* highaddr */
 2108                     NULL, NULL,                 /* filter, filterarg */
 2109                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 2110                     0,                          /* nsegments */
 2111                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 2112                     0,                          /* flags */
 2113                     NULL, NULL,                 /* lockfunc, lockarg */
 2114                     &sc_if->msk_cdata.msk_parent_tag);
 2115         if (error != 0) {
 2116                 device_printf(sc_if->msk_if_dev,
 2117                     "failed to create parent DMA tag\n");
 2118                 goto fail;
 2119         }
 2120         /* Create tag for Tx ring. */
 2121         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2122                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2123                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2124                     BUS_SPACE_MAXADDR,          /* highaddr */
 2125                     NULL, NULL,                 /* filter, filterarg */
 2126                     MSK_TX_RING_SZ,             /* maxsize */
 2127                     1,                          /* nsegments */
 2128                     MSK_TX_RING_SZ,             /* maxsegsize */
 2129                     0,                          /* flags */
 2130                     NULL, NULL,                 /* lockfunc, lockarg */
 2131                     &sc_if->msk_cdata.msk_tx_ring_tag);
 2132         if (error != 0) {
 2133                 device_printf(sc_if->msk_if_dev,
 2134                     "failed to create Tx ring DMA tag\n");
 2135                 goto fail;
 2136         }
 2137 
 2138         /* Create tag for Rx ring. */
 2139         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2140                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2141                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2142                     BUS_SPACE_MAXADDR,          /* highaddr */
 2143                     NULL, NULL,                 /* filter, filterarg */
 2144                     MSK_RX_RING_SZ,             /* maxsize */
 2145                     1,                          /* nsegments */
 2146                     MSK_RX_RING_SZ,             /* maxsegsize */
 2147                     0,                          /* flags */
 2148                     NULL, NULL,                 /* lockfunc, lockarg */
 2149                     &sc_if->msk_cdata.msk_rx_ring_tag);
 2150         if (error != 0) {
 2151                 device_printf(sc_if->msk_if_dev,
 2152                     "failed to create Rx ring DMA tag\n");
 2153                 goto fail;
 2154         }
 2155 
 2156         /* Create tag for Tx buffers. */
 2157         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2158                     1, 0,                       /* alignment, boundary */
 2159                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2160                     BUS_SPACE_MAXADDR,          /* highaddr */
 2161                     NULL, NULL,                 /* filter, filterarg */
 2162                     MSK_TSO_MAXSIZE,            /* maxsize */
 2163                     MSK_MAXTXSEGS,              /* nsegments */
 2164                     MSK_TSO_MAXSGSIZE,          /* maxsegsize */
 2165                     0,                          /* flags */
 2166                     NULL, NULL,                 /* lockfunc, lockarg */
 2167                     &sc_if->msk_cdata.msk_tx_tag);
 2168         if (error != 0) {
 2169                 device_printf(sc_if->msk_if_dev,
 2170                     "failed to create Tx DMA tag\n");
 2171                 goto fail;
 2172         }
 2173 
 2174         rxalign = 1;
 2175         /*
 2176          * Workaround hardware hang which seems to happen when Rx buffer
 2177          * is not aligned on multiple of FIFO word(8 bytes).
 2178          */
 2179         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2180                 rxalign = MSK_RX_BUF_ALIGN;
 2181         /* Create tag for Rx buffers. */
 2182         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2183                     rxalign, 0,                 /* alignment, boundary */
 2184                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2185                     BUS_SPACE_MAXADDR,          /* highaddr */
 2186                     NULL, NULL,                 /* filter, filterarg */
 2187                     MCLBYTES,                   /* maxsize */
 2188                     1,                          /* nsegments */
 2189                     MCLBYTES,                   /* maxsegsize */
 2190                     0,                          /* flags */
 2191                     NULL, NULL,                 /* lockfunc, lockarg */
 2192                     &sc_if->msk_cdata.msk_rx_tag);
 2193         if (error != 0) {
 2194                 device_printf(sc_if->msk_if_dev,
 2195                     "failed to create Rx DMA tag\n");
 2196                 goto fail;
 2197         }
 2198 
 2199         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 2200         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
 2201             (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
 2202             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
 2203         if (error != 0) {
 2204                 device_printf(sc_if->msk_if_dev,
 2205                     "failed to allocate DMA'able memory for Tx ring\n");
 2206                 goto fail;
 2207         }
 2208 
 2209         ctx.msk_busaddr = 0;
 2210         error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
 2211             sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
 2212             MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0);
 2213         if (error != 0) {
 2214                 device_printf(sc_if->msk_if_dev,
 2215                     "failed to load DMA'able memory for Tx ring\n");
 2216                 goto fail;
 2217         }
 2218         sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
 2219 
 2220         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 2221         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
 2222             (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
 2223             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
 2224         if (error != 0) {
 2225                 device_printf(sc_if->msk_if_dev,
 2226                     "failed to allocate DMA'able memory for Rx ring\n");
 2227                 goto fail;
 2228         }
 2229 
 2230         ctx.msk_busaddr = 0;
 2231         error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
 2232             sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
 2233             MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0);
 2234         if (error != 0) {
 2235                 device_printf(sc_if->msk_if_dev,
 2236                     "failed to load DMA'able memory for Rx ring\n");
 2237                 goto fail;
 2238         }
 2239         sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
 2240 
 2241         /* Create DMA maps for Tx buffers. */
 2242         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2243                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 2244                 txd->tx_m = NULL;
 2245                 txd->tx_dmamap = NULL;
 2246                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
 2247                     &txd->tx_dmamap);
 2248                 if (error != 0) {
 2249                         device_printf(sc_if->msk_if_dev,
 2250                             "failed to create Tx dmamap\n");
 2251                         goto fail;
 2252                 }
 2253         }
 2254         /* Create DMA maps for Rx buffers. */
 2255         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2256             &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
 2257                 device_printf(sc_if->msk_if_dev,
 2258                     "failed to create spare Rx dmamap\n");
 2259                 goto fail;
 2260         }
 2261         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2262                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2263                 rxd->rx_m = NULL;
 2264                 rxd->rx_dmamap = NULL;
 2265                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2266                     &rxd->rx_dmamap);
 2267                 if (error != 0) {
 2268                         device_printf(sc_if->msk_if_dev,
 2269                             "failed to create Rx dmamap\n");
 2270                         goto fail;
 2271                 }
 2272         }
 2273 
 2274 fail:
 2275         return (error);
 2276 }
 2277 
 2278 static int
 2279 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
 2280 {
 2281         struct msk_dmamap_arg ctx;
 2282         struct msk_rxdesc *jrxd;
 2283         bus_size_t rxalign;
 2284         int error, i;
 2285 
 2286         if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 2287                 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2288                 device_printf(sc_if->msk_if_dev,
 2289                     "disabling jumbo frame support\n");
 2290                 return (0);
 2291         }
 2292         /* Create tag for jumbo Rx ring. */
 2293         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2294                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2295                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2296                     BUS_SPACE_MAXADDR,          /* highaddr */
 2297                     NULL, NULL,                 /* filter, filterarg */
 2298                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2299                     1,                          /* nsegments */
 2300                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2301                     0,                          /* flags */
 2302                     NULL, NULL,                 /* lockfunc, lockarg */
 2303                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2304         if (error != 0) {
 2305                 device_printf(sc_if->msk_if_dev,
 2306                     "failed to create jumbo Rx ring DMA tag\n");
 2307                 goto jumbo_fail;
 2308         }
 2309 
 2310         rxalign = 1;
 2311         /*
 2312          * Workaround hardware hang which seems to happen when Rx buffer
 2313          * is not aligned on multiple of FIFO word(8 bytes).
 2314          */
 2315         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2316                 rxalign = MSK_RX_BUF_ALIGN;
 2317         /* Create tag for jumbo Rx buffers. */
 2318         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2319                     rxalign, 0,                 /* alignment, boundary */
 2320                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2321                     BUS_SPACE_MAXADDR,          /* highaddr */
 2322                     NULL, NULL,                 /* filter, filterarg */
 2323                     MJUM9BYTES,                 /* maxsize */
 2324                     1,                          /* nsegments */
 2325                     MJUM9BYTES,                 /* maxsegsize */
 2326                     0,                          /* flags */
 2327                     NULL, NULL,                 /* lockfunc, lockarg */
 2328                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2329         if (error != 0) {
 2330                 device_printf(sc_if->msk_if_dev,
 2331                     "failed to create jumbo Rx DMA tag\n");
 2332                 goto jumbo_fail;
 2333         }
 2334 
 2335         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2336         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2337             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2338             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2339             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2340         if (error != 0) {
 2341                 device_printf(sc_if->msk_if_dev,
 2342                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2343                 goto jumbo_fail;
 2344         }
 2345 
 2346         ctx.msk_busaddr = 0;
 2347         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2348             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2349             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2350             msk_dmamap_cb, &ctx, 0);
 2351         if (error != 0) {
 2352                 device_printf(sc_if->msk_if_dev,
 2353                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2354                 goto jumbo_fail;
 2355         }
 2356         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2357 
 2358         /* Create DMA maps for jumbo Rx buffers. */
 2359         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2360             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2361                 device_printf(sc_if->msk_if_dev,
 2362                     "failed to create spare jumbo Rx dmamap\n");
 2363                 goto jumbo_fail;
 2364         }
 2365         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2366                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2367                 jrxd->rx_m = NULL;
 2368                 jrxd->rx_dmamap = NULL;
 2369                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2370                     &jrxd->rx_dmamap);
 2371                 if (error != 0) {
 2372                         device_printf(sc_if->msk_if_dev,
 2373                             "failed to create jumbo Rx dmamap\n");
 2374                         goto jumbo_fail;
 2375                 }
 2376         }
 2377 
 2378         return (0);
 2379 
 2380 jumbo_fail:
 2381         msk_rx_dma_jfree(sc_if);
 2382         device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
 2383             "due to resource shortage\n");
 2384         sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2385         return (error);
 2386 }
 2387 
 2388 static void
 2389 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2390 {
 2391         struct msk_txdesc *txd;
 2392         struct msk_rxdesc *rxd;
 2393         int i;
 2394 
 2395         /* Tx ring. */
 2396         if (sc_if->msk_cdata.msk_tx_ring_tag) {
 2397                 if (sc_if->msk_cdata.msk_tx_ring_map)
 2398                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
 2399                             sc_if->msk_cdata.msk_tx_ring_map);
 2400                 if (sc_if->msk_cdata.msk_tx_ring_map &&
 2401                     sc_if->msk_rdata.msk_tx_ring)
 2402                         bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
 2403                             sc_if->msk_rdata.msk_tx_ring,
 2404                             sc_if->msk_cdata.msk_tx_ring_map);
 2405                 sc_if->msk_rdata.msk_tx_ring = NULL;
 2406                 sc_if->msk_cdata.msk_tx_ring_map = NULL;
 2407                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
 2408                 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
 2409         }
 2410         /* Rx ring. */
 2411         if (sc_if->msk_cdata.msk_rx_ring_tag) {
 2412                 if (sc_if->msk_cdata.msk_rx_ring_map)
 2413                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
 2414                             sc_if->msk_cdata.msk_rx_ring_map);
 2415                 if (sc_if->msk_cdata.msk_rx_ring_map &&
 2416                     sc_if->msk_rdata.msk_rx_ring)
 2417                         bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
 2418                             sc_if->msk_rdata.msk_rx_ring,
 2419                             sc_if->msk_cdata.msk_rx_ring_map);
 2420                 sc_if->msk_rdata.msk_rx_ring = NULL;
 2421                 sc_if->msk_cdata.msk_rx_ring_map = NULL;
 2422                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
 2423                 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
 2424         }
 2425         /* Tx buffers. */
 2426         if (sc_if->msk_cdata.msk_tx_tag) {
 2427                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2428                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2429                         if (txd->tx_dmamap) {
 2430                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2431                                     txd->tx_dmamap);
 2432                                 txd->tx_dmamap = NULL;
 2433                         }
 2434                 }
 2435                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2436                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2437         }
 2438         /* Rx buffers. */
 2439         if (sc_if->msk_cdata.msk_rx_tag) {
 2440                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2441                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2442                         if (rxd->rx_dmamap) {
 2443                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2444                                     rxd->rx_dmamap);
 2445                                 rxd->rx_dmamap = NULL;
 2446                         }
 2447                 }
 2448                 if (sc_if->msk_cdata.msk_rx_sparemap) {
 2449                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2450                             sc_if->msk_cdata.msk_rx_sparemap);
 2451                         sc_if->msk_cdata.msk_rx_sparemap = 0;
 2452                 }
 2453                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2454                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2455         }
 2456         if (sc_if->msk_cdata.msk_parent_tag) {
 2457                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2458                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2459         }
 2460 }
 2461 
 2462 static void
 2463 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
 2464 {
 2465         struct msk_rxdesc *jrxd;
 2466         int i;
 2467 
 2468         /* Jumbo Rx ring. */
 2469         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2470                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
 2471                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2472                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2473                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
 2474                     sc_if->msk_rdata.msk_jumbo_rx_ring)
 2475                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2476                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2477                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2478                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2479                 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
 2480                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2481                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2482         }
 2483         /* Jumbo Rx buffers. */
 2484         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2485                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2486                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2487                         if (jrxd->rx_dmamap) {
 2488                                 bus_dmamap_destroy(
 2489                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2490                                     jrxd->rx_dmamap);
 2491                                 jrxd->rx_dmamap = NULL;
 2492                         }
 2493                 }
 2494                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2495                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2496                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2497                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2498                 }
 2499                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2500                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2501         }
 2502 }
 2503 
 2504 static int
 2505 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2506 {
 2507         struct msk_txdesc *txd, *txd_last;
 2508         struct msk_tx_desc *tx_le;
 2509         struct mbuf *m;
 2510         bus_dmamap_t map;
 2511         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2512         uint32_t control, prod, si;
 2513         uint16_t offset, tcp_offset, tso_mtu;
 2514         int error, i, nseg, tso;
 2515 
 2516         MSK_IF_LOCK_ASSERT(sc_if);
 2517 
 2518         tcp_offset = offset = 0;
 2519         m = *m_head;
 2520         if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2521             (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
 2522             ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 2523             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
 2524                 /*
 2525                  * Since mbuf has no protocol specific structure information
 2526                  * in it we have to inspect protocol information here to
 2527                  * setup TSO and checksum offload. I don't know why Marvell
 2528                  * made a such decision in chip design because other GigE
 2529                  * hardwares normally takes care of all these chores in
 2530                  * hardware. However, TSO performance of Yukon II is very
 2531                  * good such that it's worth to implement it.
 2532                  */
 2533                 struct ether_header *eh;
 2534                 struct ip *ip;
 2535                 struct tcphdr *tcp;
 2536 
 2537                 if (M_WRITABLE(m) == 0) {
 2538                         /* Get a writable copy. */
 2539                         m = m_dup(*m_head, M_DONTWAIT);
 2540                         m_freem(*m_head);
 2541                         if (m == NULL) {
 2542                                 *m_head = NULL;
 2543                                 return (ENOBUFS);
 2544                         }
 2545                         *m_head = m;
 2546                 }
 2547 
 2548                 offset = sizeof(struct ether_header);
 2549                 m = m_pullup(m, offset);
 2550                 if (m == NULL) {
 2551                         *m_head = NULL;
 2552                         return (ENOBUFS);
 2553                 }
 2554                 eh = mtod(m, struct ether_header *);
 2555                 /* Check if hardware VLAN insertion is off. */
 2556                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2557                         offset = sizeof(struct ether_vlan_header);
 2558                         m = m_pullup(m, offset);
 2559                         if (m == NULL) {
 2560                                 *m_head = NULL;
 2561                                 return (ENOBUFS);
 2562                         }
 2563                 }
 2564                 m = m_pullup(m, offset + sizeof(struct ip));
 2565                 if (m == NULL) {
 2566                         *m_head = NULL;
 2567                         return (ENOBUFS);
 2568                 }
 2569                 ip = (struct ip *)(mtod(m, char *) + offset);
 2570                 offset += (ip->ip_hl << 2);
 2571                 tcp_offset = offset;
 2572                 /*
 2573                  * It seems that Yukon II has Tx checksum offload bug for
 2574                  * small TCP packets that's less than 60 bytes in size
 2575                  * (e.g. TCP window probe packet, pure ACK packet).
 2576                  * Common work around like padding with zeros to make the
 2577                  * frame minimum ethernet frame size didn't work at all.
 2578                  * Instead of disabling checksum offload completely we
 2579                  * resort to S/W checksum routine when we encounter short
 2580                  * TCP frames.
 2581                  * Short UDP packets appear to be handled correctly by
 2582                  * Yukon II. Also I assume this bug does not happen on
 2583                  * controllers that use newer descriptor format or
 2584                  * automatic Tx checksum calaulcation.
 2585                  */
 2586                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2587                     (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
 2588                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2589                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2590                         if (m == NULL) {
 2591                                 *m_head = NULL;
 2592                                 return (ENOBUFS);
 2593                         }
 2594                         *(uint16_t *)(m->m_data + offset +
 2595                             m->m_pkthdr.csum_data) = in_cksum_skip(m,
 2596                             m->m_pkthdr.len, offset);
 2597                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2598                 }
 2599                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2600                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2601                         if (m == NULL) {
 2602                                 *m_head = NULL;
 2603                                 return (ENOBUFS);
 2604                         }
 2605                         tcp = (struct tcphdr *)(mtod(m, char *) + offset);
 2606                         offset += (tcp->th_off << 2);
 2607                 }
 2608                 *m_head = m;
 2609         }
 2610 
 2611         prod = sc_if->msk_cdata.msk_tx_prod;
 2612         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2613         txd_last = txd;
 2614         map = txd->tx_dmamap;
 2615         error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
 2616             *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2617         if (error == EFBIG) {
 2618                 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
 2619                 if (m == NULL) {
 2620                         m_freem(*m_head);
 2621                         *m_head = NULL;
 2622                         return (ENOBUFS);
 2623                 }
 2624                 *m_head = m;
 2625                 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
 2626                     map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2627                 if (error != 0) {
 2628                         m_freem(*m_head);
 2629                         *m_head = NULL;
 2630                         return (error);
 2631                 }
 2632         } else if (error != 0)
 2633                 return (error);
 2634         if (nseg == 0) {
 2635                 m_freem(*m_head);
 2636                 *m_head = NULL;
 2637                 return (EIO);
 2638         }
 2639 
 2640         /* Check number of available descriptors. */
 2641         if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
 2642             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
 2643                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
 2644                 return (ENOBUFS);
 2645         }
 2646 
 2647         control = 0;
 2648         tso = 0;
 2649         tx_le = NULL;
 2650 
 2651         /* Check TSO support. */
 2652         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2653                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2654                         tso_mtu = m->m_pkthdr.tso_segsz;
 2655                 else
 2656                         tso_mtu = offset + m->m_pkthdr.tso_segsz;
 2657                 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
 2658                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2659                         tx_le->msk_addr = htole32(tso_mtu);
 2660                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2661                                 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
 2662                         else
 2663                                 tx_le->msk_control =
 2664                                     htole32(OP_LRGLEN | HW_OWNER);
 2665                         sc_if->msk_cdata.msk_tx_cnt++;
 2666                         MSK_INC(prod, MSK_TX_RING_CNT);
 2667                         sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
 2668                 }
 2669                 tso++;
 2670         }
 2671         /* Check if we have a VLAN tag to insert. */
 2672         if ((m->m_flags & M_VLANTAG) != 0) {
 2673                 if (tso == 0) {
 2674                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2675                         tx_le->msk_addr = htole32(0);
 2676                         tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2677                             htons(m->m_pkthdr.ether_vtag));
 2678                         sc_if->msk_cdata.msk_tx_cnt++;
 2679                         MSK_INC(prod, MSK_TX_RING_CNT);
 2680                 } else {
 2681                         tx_le->msk_control |= htole32(OP_VLAN |
 2682                             htons(m->m_pkthdr.ether_vtag));
 2683                 }
 2684                 control |= INS_VLAN;
 2685         }
 2686         /* Check if we have to handle checksum offload. */
 2687         if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
 2688                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
 2689                         control |= CALSUM;
 2690                 else {
 2691                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2692                         tx_le->msk_addr = htole32(((tcp_offset +
 2693                             m->m_pkthdr.csum_data) & 0xffff) |
 2694                             ((uint32_t)tcp_offset << 16));
 2695                         tx_le->msk_control = htole32(1 << 16 |
 2696                             (OP_TCPLISW | HW_OWNER));
 2697                         control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2698                         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2699                                 control |= UDPTCP;
 2700                         sc_if->msk_cdata.msk_tx_cnt++;
 2701                         MSK_INC(prod, MSK_TX_RING_CNT);
 2702                 }
 2703         }
 2704 
 2705         si = prod;
 2706         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2707         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2708         if (tso == 0)
 2709                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2710                     OP_PACKET);
 2711         else
 2712                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2713                     OP_LARGESEND);
 2714         sc_if->msk_cdata.msk_tx_cnt++;
 2715         MSK_INC(prod, MSK_TX_RING_CNT);
 2716 
 2717         for (i = 1; i < nseg; i++) {
 2718                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2719                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2720                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2721                     OP_BUFFER | HW_OWNER);
 2722                 sc_if->msk_cdata.msk_tx_cnt++;
 2723                 MSK_INC(prod, MSK_TX_RING_CNT);
 2724         }
 2725         /* Update producer index. */
 2726         sc_if->msk_cdata.msk_tx_prod = prod;
 2727 
 2728         /* Set EOP on the last desciptor. */
 2729         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2730         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2731         tx_le->msk_control |= htole32(EOP);
 2732 
 2733         /* Turn the first descriptor ownership to hardware. */
 2734         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2735         tx_le->msk_control |= htole32(HW_OWNER);
 2736 
 2737         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2738         map = txd_last->tx_dmamap;
 2739         txd_last->tx_dmamap = txd->tx_dmamap;
 2740         txd->tx_dmamap = map;
 2741         txd->tx_m = m;
 2742 
 2743         /* Sync descriptors. */
 2744         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2745         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 2746             sc_if->msk_cdata.msk_tx_ring_map,
 2747             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2748 
 2749         return (0);
 2750 }
 2751 
 2752 static void
 2753 msk_tx_task(void *arg, int pending)
 2754 {
 2755         struct ifnet *ifp;
 2756 
 2757         ifp = arg;
 2758         msk_start(ifp);
 2759 }
 2760 
 2761 static void
 2762 msk_start(struct ifnet *ifp)
 2763 {
 2764         struct msk_if_softc *sc_if;
 2765         struct mbuf *m_head;
 2766         int enq;
 2767 
 2768         sc_if = ifp->if_softc;
 2769 
 2770         MSK_IF_LOCK(sc_if);
 2771 
 2772         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2773             IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2774                 MSK_IF_UNLOCK(sc_if);
 2775                 return;
 2776         }
 2777 
 2778         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2779             sc_if->msk_cdata.msk_tx_cnt <
 2780             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
 2781                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2782                 if (m_head == NULL)
 2783                         break;
 2784                 /*
 2785                  * Pack the data into the transmit ring. If we
 2786                  * don't have room, set the OACTIVE flag and wait
 2787                  * for the NIC to drain the ring.
 2788                  */
 2789                 if (msk_encap(sc_if, &m_head) != 0) {
 2790                         if (m_head == NULL)
 2791                                 break;
 2792                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2793                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2794                         break;
 2795                 }
 2796 
 2797                 enq++;
 2798                 /*
 2799                  * If there's a BPF listener, bounce a copy of this frame
 2800                  * to him.
 2801                  */
 2802                 ETHER_BPF_MTAP(ifp, m_head);
 2803         }
 2804 
 2805         if (enq > 0) {
 2806                 /* Transmit */
 2807                 CSR_WRITE_2(sc_if->msk_softc,
 2808                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2809                     sc_if->msk_cdata.msk_tx_prod);
 2810 
 2811                 /* Set a timeout in case the chip goes out to lunch. */
 2812                 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
 2813         }
 2814 
 2815         MSK_IF_UNLOCK(sc_if);
 2816 }
 2817 
 2818 static void
 2819 msk_watchdog(struct msk_if_softc *sc_if)
 2820 {
 2821         struct ifnet *ifp;
 2822         uint32_t ridx;
 2823         int idx;
 2824 
 2825         MSK_IF_LOCK_ASSERT(sc_if);
 2826 
 2827         if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
 2828                 return;
 2829         ifp = sc_if->msk_ifp;
 2830         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2831                 if (bootverbose)
 2832                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2833                            "(missed link)\n");
 2834                 ifp->if_oerrors++;
 2835                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2836                 msk_init_locked(sc_if);
 2837                 return;
 2838         }
 2839 
 2840         /*
 2841          * Reclaim first as there is a possibility of losing Tx completion
 2842          * interrupts.
 2843          */
 2844         ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
 2845         idx = CSR_READ_2(sc_if->msk_softc, ridx);
 2846         if (sc_if->msk_cdata.msk_tx_cons != idx) {
 2847                 msk_txeof(sc_if, idx);
 2848                 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
 2849                         if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
 2850                             "-- recovering\n");
 2851                         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2852                                 taskqueue_enqueue(taskqueue_fast,
 2853                                     &sc_if->msk_tx_task);
 2854                         return;
 2855                 }
 2856         }
 2857 
 2858         if_printf(ifp, "watchdog timeout\n");
 2859         ifp->if_oerrors++;
 2860         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2861         msk_init_locked(sc_if);
 2862         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2863                 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task);
 2864 }
 2865 
 2866 static int
 2867 mskc_shutdown(device_t dev)
 2868 {
 2869         struct msk_softc *sc;
 2870         int i;
 2871 
 2872         sc = device_get_softc(dev);
 2873         MSK_LOCK(sc);
 2874         for (i = 0; i < sc->msk_num_port; i++) {
 2875                 if (sc->msk_if[i] != NULL)
 2876                         msk_stop(sc->msk_if[i]);
 2877         }
 2878 
 2879         /* Disable all interrupts. */
 2880         CSR_WRITE_4(sc, B0_IMSK, 0);
 2881         CSR_READ_4(sc, B0_IMSK);
 2882         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2883         CSR_READ_4(sc, B0_HWE_IMSK);
 2884 
 2885         /* Put hardware reset. */
 2886         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2887 
 2888         MSK_UNLOCK(sc);
 2889         return (0);
 2890 }
 2891 
 2892 static int
 2893 mskc_suspend(device_t dev)
 2894 {
 2895         struct msk_softc *sc;
 2896         int i;
 2897 
 2898         sc = device_get_softc(dev);
 2899 
 2900         MSK_LOCK(sc);
 2901 
 2902         for (i = 0; i < sc->msk_num_port; i++) {
 2903                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 2904                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 2905                     IFF_DRV_RUNNING) != 0))
 2906                         msk_stop(sc->msk_if[i]);
 2907         }
 2908 
 2909         /* Disable all interrupts. */
 2910         CSR_WRITE_4(sc, B0_IMSK, 0);
 2911         CSR_READ_4(sc, B0_IMSK);
 2912         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2913         CSR_READ_4(sc, B0_HWE_IMSK);
 2914 
 2915         msk_phy_power(sc, MSK_PHY_POWERDOWN);
 2916 
 2917         /* Put hardware reset. */
 2918         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2919         sc->msk_pflags |= MSK_FLAG_SUSPEND;
 2920 
 2921         MSK_UNLOCK(sc);
 2922 
 2923         return (0);
 2924 }
 2925 
 2926 static int
 2927 mskc_resume(device_t dev)
 2928 {
 2929         struct msk_softc *sc;
 2930         int i;
 2931 
 2932         sc = device_get_softc(dev);
 2933 
 2934         MSK_LOCK(sc);
 2935 
 2936         mskc_reset(sc);
 2937         for (i = 0; i < sc->msk_num_port; i++) {
 2938                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 2939                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
 2940                         sc->msk_if[i]->msk_ifp->if_drv_flags &=
 2941                             ~IFF_DRV_RUNNING;
 2942                         msk_init_locked(sc->msk_if[i]);
 2943                 }
 2944         }
 2945         sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
 2946 
 2947         MSK_UNLOCK(sc);
 2948 
 2949         return (0);
 2950 }
 2951 
 2952 #ifndef __NO_STRICT_ALIGNMENT
 2953 static __inline void
 2954 msk_fixup_rx(struct mbuf *m)
 2955 {
 2956         int i;
 2957         uint16_t *src, *dst;
 2958 
 2959         src = mtod(m, uint16_t *);
 2960         dst = src - 3;
 2961 
 2962         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 2963                 *dst++ = *src++;
 2964 
 2965         m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
 2966 }
 2967 #endif
 2968 
 2969 static void
 2970 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 2971     int len)
 2972 {
 2973         struct mbuf *m;
 2974         struct ifnet *ifp;
 2975         struct msk_rxdesc *rxd;
 2976         int cons, rxlen;
 2977 
 2978         ifp = sc_if->msk_ifp;
 2979 
 2980         MSK_IF_LOCK_ASSERT(sc_if);
 2981 
 2982         cons = sc_if->msk_cdata.msk_rx_cons;
 2983         do {
 2984                 rxlen = status >> 16;
 2985                 if ((status & GMR_FS_VLAN) != 0 &&
 2986                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2987                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 2988                 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
 2989                         /*
 2990                          * For controllers that returns bogus status code
 2991                          * just do minimal check and let upper stack
 2992                          * handle this frame.
 2993                          */
 2994                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 2995                                 ifp->if_ierrors++;
 2996                                 msk_discard_rxbuf(sc_if, cons);
 2997                                 break;
 2998                         }
 2999                 } else if (len > sc_if->msk_framesize ||
 3000                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3001                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3002                         /* Don't count flow-control packet as errors. */
 3003                         if ((status & GMR_FS_GOOD_FC) == 0)
 3004                                 ifp->if_ierrors++;
 3005                         msk_discard_rxbuf(sc_if, cons);
 3006                         break;
 3007                 }
 3008                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 3009                 m = rxd->rx_m;
 3010                 if (msk_newbuf(sc_if, cons) != 0) {
 3011                         ifp->if_iqdrops++;
 3012                         /* Reuse old buffer. */
 3013                         msk_discard_rxbuf(sc_if, cons);
 3014                         break;
 3015                 }
 3016                 m->m_pkthdr.rcvif = ifp;
 3017                 m->m_pkthdr.len = m->m_len = len;
 3018 #ifndef __NO_STRICT_ALIGNMENT
 3019                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3020                         msk_fixup_rx(m);
 3021 #endif
 3022                 ifp->if_ipackets++;
 3023                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 3024                     (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3025                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3026                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3027                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3028                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3029                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3030                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3031                                     CSUM_PSEUDO_HDR;
 3032                                 m->m_pkthdr.csum_data = 0xffff;
 3033                         }
 3034                 }
 3035                 /* Check for VLAN tagged packets. */
 3036                 if ((status & GMR_FS_VLAN) != 0 &&
 3037                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3038                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3039                         m->m_flags |= M_VLANTAG;
 3040                 }
 3041                 MSK_IF_UNLOCK(sc_if);
 3042                 (*ifp->if_input)(ifp, m);
 3043                 MSK_IF_LOCK(sc_if);
 3044         } while (0);
 3045 
 3046         MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 3047         MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 3048 }
 3049 
 3050 static void
 3051 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3052     int len)
 3053 {
 3054         struct mbuf *m;
 3055         struct ifnet *ifp;
 3056         struct msk_rxdesc *jrxd;
 3057         int cons, rxlen;
 3058 
 3059         ifp = sc_if->msk_ifp;
 3060 
 3061         MSK_IF_LOCK_ASSERT(sc_if);
 3062 
 3063         cons = sc_if->msk_cdata.msk_rx_cons;
 3064         do {
 3065                 rxlen = status >> 16;
 3066                 if ((status & GMR_FS_VLAN) != 0 &&
 3067                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3068                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3069                 if (len > sc_if->msk_framesize ||
 3070                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3071                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3072                         /* Don't count flow-control packet as errors. */
 3073                         if ((status & GMR_FS_GOOD_FC) == 0)
 3074                                 ifp->if_ierrors++;
 3075                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3076                         break;
 3077                 }
 3078                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 3079                 m = jrxd->rx_m;
 3080                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 3081                         ifp->if_iqdrops++;
 3082                         /* Reuse old buffer. */
 3083                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3084                         break;
 3085                 }
 3086                 m->m_pkthdr.rcvif = ifp;
 3087                 m->m_pkthdr.len = m->m_len = len;
 3088 #ifndef __NO_STRICT_ALIGNMENT
 3089                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3090                         msk_fixup_rx(m);
 3091 #endif
 3092                 ifp->if_ipackets++;
 3093                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
 3094                     (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3095                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3096                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3097                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3098                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3099                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3100                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3101                                     CSUM_PSEUDO_HDR;
 3102                                 m->m_pkthdr.csum_data = 0xffff;
 3103                         }
 3104                 }
 3105                 /* Check for VLAN tagged packets. */
 3106                 if ((status & GMR_FS_VLAN) != 0 &&
 3107                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3108                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3109                         m->m_flags |= M_VLANTAG;
 3110                 }
 3111                 MSK_IF_UNLOCK(sc_if);
 3112                 (*ifp->if_input)(ifp, m);
 3113                 MSK_IF_LOCK(sc_if);
 3114         } while (0);
 3115 
 3116         MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 3117         MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 3118 }
 3119 
 3120 static void
 3121 msk_txeof(struct msk_if_softc *sc_if, int idx)
 3122 {
 3123         struct msk_txdesc *txd;
 3124         struct msk_tx_desc *cur_tx;
 3125         struct ifnet *ifp;
 3126         uint32_t control;
 3127         int cons, prog;
 3128 
 3129         MSK_IF_LOCK_ASSERT(sc_if);
 3130 
 3131         ifp = sc_if->msk_ifp;
 3132 
 3133         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 3134             sc_if->msk_cdata.msk_tx_ring_map,
 3135             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3136         /*
 3137          * Go through our tx ring and free mbufs for those
 3138          * frames that have been sent.
 3139          */
 3140         cons = sc_if->msk_cdata.msk_tx_cons;
 3141         prog = 0;
 3142         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 3143                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 3144                         break;
 3145                 prog++;
 3146                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 3147                 control = le32toh(cur_tx->msk_control);
 3148                 sc_if->msk_cdata.msk_tx_cnt--;
 3149                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3150                 if ((control & EOP) == 0)
 3151                         continue;
 3152                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 3153                 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
 3154                     BUS_DMASYNC_POSTWRITE);
 3155                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 3156 
 3157                 ifp->if_opackets++;
 3158                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 3159                     __func__));
 3160                 m_freem(txd->tx_m);
 3161                 txd->tx_m = NULL;
 3162         }
 3163 
 3164         if (prog > 0) {
 3165                 sc_if->msk_cdata.msk_tx_cons = cons;
 3166                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 3167                         sc_if->msk_watchdog_timer = 0;
 3168                 /* No need to sync LEs as we didn't update LEs. */
 3169         }
 3170 }
 3171 
 3172 static void
 3173 msk_tick(void *xsc_if)
 3174 {
 3175         struct msk_if_softc *sc_if;
 3176         struct mii_data *mii;
 3177 
 3178         sc_if = xsc_if;
 3179 
 3180         MSK_IF_LOCK_ASSERT(sc_if);
 3181 
 3182         mii = device_get_softc(sc_if->msk_miibus);
 3183 
 3184         mii_tick(mii);
 3185         msk_watchdog(sc_if);
 3186         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3187 }
 3188 
 3189 static void
 3190 msk_intr_phy(struct msk_if_softc *sc_if)
 3191 {
 3192         uint16_t status;
 3193 
 3194         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3195         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3196         /* Handle FIFO Underrun/Overflow? */
 3197         if ((status & PHY_M_IS_FIFO_ERROR))
 3198                 device_printf(sc_if->msk_if_dev,
 3199                     "PHY FIFO underrun/overflow.\n");
 3200 }
 3201 
 3202 static void
 3203 msk_intr_gmac(struct msk_if_softc *sc_if)
 3204 {
 3205         struct msk_softc *sc;
 3206         uint8_t status;
 3207 
 3208         sc = sc_if->msk_softc;
 3209         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3210 
 3211         /* GMAC Rx FIFO overrun. */
 3212         if ((status & GM_IS_RX_FF_OR) != 0) {
 3213                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3214                     GMF_CLI_RX_FO);
 3215                 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
 3216         }
 3217         /* GMAC Tx FIFO underrun. */
 3218         if ((status & GM_IS_TX_FF_UR) != 0) {
 3219                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3220                     GMF_CLI_TX_FU);
 3221                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3222                 /*
 3223                  * XXX
 3224                  * In case of Tx underrun, we may need to flush/reset
 3225                  * Tx MAC but that would also require resynchronization
 3226                  * with status LEs. Reintializing status LEs would
 3227                  * affect other port in dual MAC configuration so it
 3228                  * should be avoided as possible as we can.
 3229                  * Due to lack of documentation it's all vague guess but
 3230                  * it needs more investigation.
 3231                  */
 3232         }
 3233 }
 3234 
 3235 static void
 3236 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3237 {
 3238         struct msk_softc *sc;
 3239 
 3240         sc = sc_if->msk_softc;
 3241         if ((status & Y2_IS_PAR_RD1) != 0) {
 3242                 device_printf(sc_if->msk_if_dev,
 3243                     "RAM buffer read parity error\n");
 3244                 /* Clear IRQ. */
 3245                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3246                     RI_CLR_RD_PERR);
 3247         }
 3248         if ((status & Y2_IS_PAR_WR1) != 0) {
 3249                 device_printf(sc_if->msk_if_dev,
 3250                     "RAM buffer write parity error\n");
 3251                 /* Clear IRQ. */
 3252                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3253                     RI_CLR_WR_PERR);
 3254         }
 3255         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3256                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3257                 /* Clear IRQ. */
 3258                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3259                     GMF_CLI_TX_PE);
 3260         }
 3261         if ((status & Y2_IS_PAR_RX1) != 0) {
 3262                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3263                 /* Clear IRQ. */
 3264                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3265         }
 3266         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3267                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3268                 /* Clear IRQ. */
 3269                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3270         }
 3271 }
 3272 
 3273 static void
 3274 msk_intr_hwerr(struct msk_softc *sc)
 3275 {
 3276         uint32_t status;
 3277         uint32_t tlphead[4];
 3278 
 3279         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3280         /* Time Stamp timer overflow. */
 3281         if ((status & Y2_IS_TIST_OV) != 0)
 3282                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3283         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3284                 /*
 3285                  * PCI Express Error occured which is not described in PEX
 3286                  * spec.
 3287                  * This error is also mapped either to Master Abort(
 3288                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3289                  * can only be cleared there.
 3290                  */
 3291                 device_printf(sc->msk_dev,
 3292                     "PCI Express protocol violation error\n");
 3293         }
 3294 
 3295         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3296                 uint16_t v16;
 3297 
 3298                 if ((status & Y2_IS_MST_ERR) != 0)
 3299                         device_printf(sc->msk_dev,
 3300                             "unexpected IRQ Status error\n");
 3301                 else
 3302                         device_printf(sc->msk_dev,
 3303                             "unexpected IRQ Master error\n");
 3304                 /* Reset all bits in the PCI status register. */
 3305                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3306                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3307                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3308                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3309                     PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
 3310                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3311         }
 3312 
 3313         /* Check for PCI Express Uncorrectable Error. */
 3314         if ((status & Y2_IS_PCI_EXP) != 0) {
 3315                 uint32_t v32;
 3316 
 3317                 /*
 3318                  * On PCI Express bus bridges are called root complexes (RC).
 3319                  * PCI Express errors are recognized by the root complex too,
 3320                  * which requests the system to handle the problem. After
 3321                  * error occurence it may be that no access to the adapter
 3322                  * may be performed any longer.
 3323                  */
 3324 
 3325                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3326                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3327                         /* Ignore unsupported request error. */
 3328                         device_printf(sc->msk_dev,
 3329                             "Uncorrectable PCI Express error\n");
 3330                 }
 3331                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3332                         int i;
 3333 
 3334                         /* Get TLP header form Log Registers. */
 3335                         for (i = 0; i < 4; i++)
 3336                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3337                                     PEX_HEADER_LOG + i * 4);
 3338                         /* Check for vendor defined broadcast message. */
 3339                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3340                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3341                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3342                                     sc->msk_intrhwemask);
 3343                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3344                         }
 3345                 }
 3346                 /* Clear the interrupt. */
 3347                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3348                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3349                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3350         }
 3351 
 3352         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3353                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3354         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3355                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3356 }
 3357 
 3358 static __inline void
 3359 msk_rxput(struct msk_if_softc *sc_if)
 3360 {
 3361         struct msk_softc *sc;
 3362 
 3363         sc = sc_if->msk_softc;
 3364         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
 3365                 bus_dmamap_sync(
 3366                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3367                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3368                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3369         else
 3370                 bus_dmamap_sync(
 3371                     sc_if->msk_cdata.msk_rx_ring_tag,
 3372                     sc_if->msk_cdata.msk_rx_ring_map,
 3373                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3374         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3375             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3376 }
 3377 
 3378 static int
 3379 msk_handle_events(struct msk_softc *sc)
 3380 {
 3381         struct msk_if_softc *sc_if;
 3382         int rxput[2];
 3383         struct msk_stat_desc *sd;
 3384         uint32_t control, status;
 3385         int cons, idx, len, port, rxprog;
 3386 
 3387         idx = CSR_READ_2(sc, STAT_PUT_IDX);
 3388         if (idx == sc->msk_stat_cons)
 3389                 return (0);
 3390 
 3391         /* Sync status LEs. */
 3392         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3393             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3394         /* XXX Sync Rx LEs here. */
 3395 
 3396         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3397 
 3398         rxprog = 0;
 3399         for (cons = sc->msk_stat_cons; cons != idx;) {
 3400                 sd = &sc->msk_stat_ring[cons];
 3401                 control = le32toh(sd->msk_control);
 3402                 if ((control & HW_OWNER) == 0)
 3403                         break;
 3404                 /*
 3405                  * Marvell's FreeBSD driver updates status LE after clearing
 3406                  * HW_OWNER. However we don't have a way to sync single LE
 3407                  * with bus_dma(9) API. bus_dma(9) provides a way to sync
 3408                  * an entire DMA map. So don't sync LE until we have a better
 3409                  * way to sync LEs.
 3410                  */
 3411                 control &= ~HW_OWNER;
 3412                 sd->msk_control = htole32(control);
 3413                 status = le32toh(sd->msk_status);
 3414                 len = control & STLE_LEN_MASK;
 3415                 port = (control >> 16) & 0x01;
 3416                 sc_if = sc->msk_if[port];
 3417                 if (sc_if == NULL) {
 3418                         device_printf(sc->msk_dev, "invalid port opcode "
 3419                             "0x%08x\n", control & STLE_OP_MASK);
 3420                         continue;
 3421                 }
 3422 
 3423                 switch (control & STLE_OP_MASK) {
 3424                 case OP_RXVLAN:
 3425                         sc_if->msk_vtag = ntohs(len);
 3426                         break;
 3427                 case OP_RXCHKSVLAN:
 3428                         sc_if->msk_vtag = ntohs(len);
 3429                         break;
 3430                 case OP_RXSTAT:
 3431                         if (sc_if->msk_framesize >
 3432                             (MCLBYTES - MSK_RX_BUF_ALIGN))
 3433                                 msk_jumbo_rxeof(sc_if, status, control, len);
 3434                         else
 3435                                 msk_rxeof(sc_if, status, control, len);
 3436                         rxprog++;
 3437                         /*
 3438                          * Because there is no way to sync single Rx LE
 3439                          * put the DMA sync operation off until the end of
 3440                          * event processing.
 3441                          */
 3442                         rxput[port]++;
 3443                         /* Update prefetch unit if we've passed water mark. */
 3444                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3445                                 msk_rxput(sc_if);
 3446                                 rxput[port] = 0;
 3447                         }
 3448                         break;
 3449                 case OP_TXINDEXLE:
 3450                         if (sc->msk_if[MSK_PORT_A] != NULL)
 3451                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3452                                     status & STLE_TXA1_MSKL);
 3453                         if (sc->msk_if[MSK_PORT_B] != NULL)
 3454                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3455                                     ((status & STLE_TXA2_MSKL) >>
 3456                                     STLE_TXA2_SHIFTL) |
 3457                                     ((len & STLE_TXA2_MSKH) <<
 3458                                     STLE_TXA2_SHIFTH));
 3459                         break;
 3460                 default:
 3461                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3462                             control & STLE_OP_MASK);
 3463                         break;
 3464                 }
 3465                 MSK_INC(cons, MSK_STAT_RING_CNT);
 3466                 if (rxprog > sc->msk_process_limit)
 3467                         break;
 3468         }
 3469 
 3470         sc->msk_stat_cons = cons;
 3471         /* XXX We should sync status LEs here. See above notes. */
 3472 
 3473         if (rxput[MSK_PORT_A] > 0)
 3474                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3475         if (rxput[MSK_PORT_B] > 0)
 3476                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3477 
 3478         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3479 }
 3480 
 3481 /* Legacy interrupt handler for shared interrupt. */
 3482 static void
 3483 msk_legacy_intr(void *xsc)
 3484 {
 3485         struct msk_softc *sc;
 3486         struct msk_if_softc *sc_if0, *sc_if1;
 3487         struct ifnet *ifp0, *ifp1;
 3488         uint32_t status;
 3489 
 3490         sc = xsc;
 3491         MSK_LOCK(sc);
 3492 
 3493         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3494         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3495         if (status == 0 || status == 0xffffffff ||
 3496             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3497             (status & sc->msk_intrmask) == 0) {
 3498                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3499                 return;
 3500         }
 3501 
 3502         sc_if0 = sc->msk_if[MSK_PORT_A];
 3503         sc_if1 = sc->msk_if[MSK_PORT_B];
 3504         ifp0 = ifp1 = NULL;
 3505         if (sc_if0 != NULL)
 3506                 ifp0 = sc_if0->msk_ifp;
 3507         if (sc_if1 != NULL)
 3508                 ifp1 = sc_if1->msk_ifp;
 3509 
 3510         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3511                 msk_intr_phy(sc_if0);
 3512         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3513                 msk_intr_phy(sc_if1);
 3514         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3515                 msk_intr_gmac(sc_if0);
 3516         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3517                 msk_intr_gmac(sc_if1);
 3518         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3519                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3520                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3521                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3522                 CSR_READ_4(sc, B0_IMSK);
 3523         }
 3524         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3525                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3526                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3527                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3528                 CSR_READ_4(sc, B0_IMSK);
 3529         }
 3530         if ((status & Y2_IS_HW_ERR) != 0)
 3531                 msk_intr_hwerr(sc);
 3532 
 3533         while (msk_handle_events(sc) != 0)
 3534                 ;
 3535         if ((status & Y2_IS_STAT_BMU) != 0)
 3536                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3537 
 3538         /* Reenable interrupts. */
 3539         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3540 
 3541         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3542             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3543                 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
 3544         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3545             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3546                 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
 3547 
 3548         MSK_UNLOCK(sc);
 3549 }
 3550 
 3551 static int
 3552 msk_intr(void *xsc)
 3553 {
 3554         struct msk_softc *sc;
 3555         uint32_t status;
 3556 
 3557         sc = xsc;
 3558         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3559         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3560         if (status == 0 || status == 0xffffffff) {
 3561                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3562                 return (FILTER_STRAY);
 3563         }
 3564 
 3565         taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
 3566         return (FILTER_HANDLED);
 3567 }
 3568 
 3569 static void
 3570 msk_int_task(void *arg, int pending)
 3571 {
 3572         struct msk_softc *sc;
 3573         struct msk_if_softc *sc_if0, *sc_if1;
 3574         struct ifnet *ifp0, *ifp1;
 3575         uint32_t status;
 3576         int domore;
 3577 
 3578         sc = arg;
 3579         MSK_LOCK(sc);
 3580 
 3581         /* Get interrupt source. */
 3582         status = CSR_READ_4(sc, B0_ISRC);
 3583         if (status == 0 || status == 0xffffffff ||
 3584             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3585             (status & sc->msk_intrmask) == 0)
 3586                 goto done;
 3587 
 3588         sc_if0 = sc->msk_if[MSK_PORT_A];
 3589         sc_if1 = sc->msk_if[MSK_PORT_B];
 3590         ifp0 = ifp1 = NULL;
 3591         if (sc_if0 != NULL)
 3592                 ifp0 = sc_if0->msk_ifp;
 3593         if (sc_if1 != NULL)
 3594                 ifp1 = sc_if1->msk_ifp;
 3595 
 3596         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3597                 msk_intr_phy(sc_if0);
 3598         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3599                 msk_intr_phy(sc_if1);
 3600         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3601                 msk_intr_gmac(sc_if0);
 3602         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3603                 msk_intr_gmac(sc_if1);
 3604         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3605                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3606                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3607                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3608                 CSR_READ_4(sc, B0_IMSK);
 3609         }
 3610         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3611                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3612                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3613                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3614                 CSR_READ_4(sc, B0_IMSK);
 3615         }
 3616         if ((status & Y2_IS_HW_ERR) != 0)
 3617                 msk_intr_hwerr(sc);
 3618 
 3619         domore = msk_handle_events(sc);
 3620         if ((status & Y2_IS_STAT_BMU) != 0)
 3621                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3622 
 3623         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3624             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3625                 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task);
 3626         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3627             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3628                 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task);
 3629 
 3630         if (domore > 0) {
 3631                 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task);
 3632                 MSK_UNLOCK(sc);
 3633                 return;
 3634         }
 3635 done:
 3636         MSK_UNLOCK(sc);
 3637 
 3638         /* Reenable interrupts. */
 3639         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3640 }
 3641 
 3642 static void
 3643 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3644 {
 3645         struct msk_softc *sc;
 3646         struct ifnet *ifp;
 3647 
 3648         ifp = sc_if->msk_ifp;
 3649         sc = sc_if->msk_softc;
 3650         switch (sc->msk_hw_id) {
 3651         case CHIP_ID_YUKON_EX:
 3652                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 3653                         goto yukon_ex_workaround;
 3654                 if (ifp->if_mtu > ETHERMTU)
 3655                         CSR_WRITE_4(sc,
 3656                             MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3657                             TX_JUMBO_ENA | TX_STFW_ENA);
 3658                 else
 3659                         CSR_WRITE_4(sc,
 3660                             MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3661                             TX_JUMBO_DIS | TX_STFW_ENA);
 3662                 break;
 3663         default:
 3664 yukon_ex_workaround:
 3665                 if (ifp->if_mtu > ETHERMTU) {
 3666                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3667                         CSR_WRITE_4(sc,
 3668                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3669                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3670                         /* Disable Store & Forward mode for Tx. */
 3671                         CSR_WRITE_4(sc,
 3672                             MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3673                             TX_JUMBO_ENA | TX_STFW_DIS);
 3674                 } else {
 3675                         /* Enable Store & Forward mode for Tx. */
 3676                         CSR_WRITE_4(sc,
 3677                             MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3678                             TX_JUMBO_DIS | TX_STFW_ENA);
 3679                 }
 3680                 break;
 3681         }
 3682 }
 3683 
 3684 static void
 3685 msk_init(void *xsc)
 3686 {
 3687         struct msk_if_softc *sc_if = xsc;
 3688 
 3689         MSK_IF_LOCK(sc_if);
 3690         msk_init_locked(sc_if);
 3691         MSK_IF_UNLOCK(sc_if);
 3692 }
 3693 
 3694 static void
 3695 msk_init_locked(struct msk_if_softc *sc_if)
 3696 {
 3697         struct msk_softc *sc;
 3698         struct ifnet *ifp;
 3699         struct mii_data  *mii;
 3700         uint16_t eaddr[ETHER_ADDR_LEN / 2];
 3701         uint16_t gmac;
 3702         uint32_t reg;
 3703         int error, i;
 3704 
 3705         MSK_IF_LOCK_ASSERT(sc_if);
 3706 
 3707         ifp = sc_if->msk_ifp;
 3708         sc = sc_if->msk_softc;
 3709         mii = device_get_softc(sc_if->msk_miibus);
 3710 
 3711         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 3712                 return;
 3713 
 3714         error = 0;
 3715         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3716         msk_stop(sc_if);
 3717 
 3718         if (ifp->if_mtu < ETHERMTU)
 3719                 sc_if->msk_framesize = ETHERMTU;
 3720         else
 3721                 sc_if->msk_framesize = ifp->if_mtu;
 3722         sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3723         if (ifp->if_mtu > ETHERMTU &&
 3724             (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 3725                 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 3726                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 3727         }
 3728 
 3729         /* GMAC Control reset. */
 3730         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3731         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3732         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3733         if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3734                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3735                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3736                     GMC_BYP_RETR_ON);
 3737  
 3738         /*
 3739          * Initialize GMAC first such that speed/duplex/flow-control
 3740          * parameters are renegotiated when interface is brought up.
 3741          */
 3742         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3743 
 3744         /* Dummy read the Interrupt Source Register. */
 3745         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3746 
 3747         /* Clear MIB stats. */
 3748         msk_stats_clear(sc_if);
 3749 
 3750         /* Disable FCS. */
 3751         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3752 
 3753         /* Setup Transmit Control Register. */
 3754         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3755 
 3756         /* Setup Transmit Flow Control Register. */
 3757         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3758 
 3759         /* Setup Transmit Parameter Register. */
 3760         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3761             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3762             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3763 
 3764         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3765             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3766 
 3767         if (ifp->if_mtu > ETHERMTU)
 3768                 gmac |= GM_SMOD_JUMBO_ENA;
 3769         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3770 
 3771         /* Set station address. */
 3772         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
 3773         for (i = 0; i < ETHER_ADDR_LEN /2; i++)
 3774                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
 3775                     eaddr[i]);
 3776         for (i = 0; i < ETHER_ADDR_LEN /2; i++)
 3777                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
 3778                     eaddr[i]);
 3779 
 3780         /* Disable interrupts for counter overflows. */
 3781         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3782         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3783         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3784 
 3785         /* Configure Rx MAC FIFO. */
 3786         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3787         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3788         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3789         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3790             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3791                 reg |= GMF_RX_OVER_ON;
 3792         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3793 
 3794         /* Set receive filter. */
 3795         msk_rxfilter(sc_if);
 3796 
 3797         /* Flush Rx MAC FIFO on any flow control or error. */
 3798         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3799             GMR_FS_ANY_ERR);
 3800 
 3801         /*
 3802          * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
 3803          * due to hardware hang on receipt of pause frames.
 3804          */
 3805         reg = RX_GMF_FL_THR_DEF + 1;
 3806         /* Another magic for Yukon FE+ - From Linux. */
 3807         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3808             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3809                 reg = 0x178;
 3810         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3811 
 3812         /* Configure Tx MAC FIFO. */
 3813         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3814         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3815         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3816 
 3817         /* Configure hardware VLAN tag insertion/stripping. */
 3818         msk_setvlan(sc_if, ifp);
 3819 
 3820         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3821                 /* Set Rx Pause threshould. */
 3822                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3823                     MSK_ECU_LLPP);
 3824                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3825                     MSK_ECU_ULPP);
 3826                 /* Configure store-and-forward for Tx. */
 3827                 msk_set_tx_stfwd(sc_if);
 3828         }
 3829 
 3830         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3831             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3832                 /* Disable dynamic watermark - from Linux. */
 3833                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3834                 reg &= ~0x03;
 3835                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3836         }
 3837 
 3838         /*
 3839          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3840          * arbiter as we don't use Sync Tx queue.
 3841          */
 3842         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3843             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3844         /* Enable the RAM Interface Arbiter. */
 3845         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3846 
 3847         /* Setup RAM buffer. */
 3848         msk_set_rambuffer(sc_if);
 3849 
 3850         /* Disable Tx sync Queue. */
 3851         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3852 
 3853         /* Setup Tx Queue Bus Memory Interface. */
 3854         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3855         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3856         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3857         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3858         switch (sc->msk_hw_id) {
 3859         case CHIP_ID_YUKON_EC_U:
 3860                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3861                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3862                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3863                             MSK_ECU_TXFF_LEV);
 3864                 }
 3865                 break;
 3866         case CHIP_ID_YUKON_EX:
 3867                 /*
 3868                  * Yukon Extreme seems to have silicon bug for
 3869                  * automatic Tx checksum calculation capability.
 3870                  */
 3871                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 3872                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3873                             F_TX_CHK_AUTO_OFF);
 3874                 break;
 3875         }
 3876 
 3877         /* Setup Rx Queue Bus Memory Interface. */
 3878         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 3879         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 3880         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 3881         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 3882         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 3883             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 3884                 /* MAC Rx RAM Read is controlled by hardware. */
 3885                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 3886         }
 3887 
 3888         msk_set_prefetch(sc, sc_if->msk_txq,
 3889             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 3890         msk_init_tx_ring(sc_if);
 3891 
 3892         /* Disable Rx checksum offload and RSS hash. */
 3893         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 3894             BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
 3895         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
 3896                 msk_set_prefetch(sc, sc_if->msk_rxq,
 3897                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 3898                     MSK_JUMBO_RX_RING_CNT - 1);
 3899                 error = msk_init_jumbo_rx_ring(sc_if);
 3900          } else {
 3901                 msk_set_prefetch(sc, sc_if->msk_rxq,
 3902                     sc_if->msk_rdata.msk_rx_ring_paddr,
 3903                     MSK_RX_RING_CNT - 1);
 3904                 error = msk_init_rx_ring(sc_if);
 3905         }
 3906         if (error != 0) {
 3907                 device_printf(sc_if->msk_if_dev,
 3908                     "initialization failed: no memory for Rx buffers\n");
 3909                 msk_stop(sc_if);
 3910                 return;
 3911         }
 3912 
 3913         /* Configure interrupt handling. */
 3914         if (sc_if->msk_port == MSK_PORT_A) {
 3915                 sc->msk_intrmask |= Y2_IS_PORT_A;
 3916                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 3917         } else {
 3918                 sc->msk_intrmask |= Y2_IS_PORT_B;
 3919                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 3920         }
 3921         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 3922         CSR_READ_4(sc, B0_HWE_IMSK);
 3923         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3924         CSR_READ_4(sc, B0_IMSK);
 3925 
 3926         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 3927         mii_mediachg(mii);
 3928 
 3929         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 3930         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3931 
 3932         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3933 }
 3934 
 3935 static void
 3936 msk_set_rambuffer(struct msk_if_softc *sc_if)
 3937 {
 3938         struct msk_softc *sc;
 3939         int ltpp, utpp;
 3940 
 3941         sc = sc_if->msk_softc;
 3942         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 3943                 return;
 3944 
 3945         /* Setup Rx Queue. */
 3946         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 3947         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 3948             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3949         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 3950             sc->msk_rxqend[sc_if->msk_port] / 8);
 3951         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 3952             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3953         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 3954             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3955 
 3956         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 3957             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 3958         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 3959             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 3960         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 3961                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 3962         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 3963         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 3964         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 3965 
 3966         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 3967         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 3968 
 3969         /* Setup Tx Queue. */
 3970         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 3971         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 3972             sc->msk_txqstart[sc_if->msk_port] / 8);
 3973         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 3974             sc->msk_txqend[sc_if->msk_port] / 8);
 3975         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 3976             sc->msk_txqstart[sc_if->msk_port] / 8);
 3977         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 3978             sc->msk_txqstart[sc_if->msk_port] / 8);
 3979         /* Enable Store & Forward for Tx side. */
 3980         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 3981         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 3982         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 3983 }
 3984 
 3985 static void
 3986 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 3987     uint32_t count)
 3988 {
 3989 
 3990         /* Reset the prefetch unit. */
 3991         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 3992             PREF_UNIT_RST_SET);
 3993         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 3994             PREF_UNIT_RST_CLR);
 3995         /* Set LE base address. */
 3996         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 3997             MSK_ADDR_LO(addr));
 3998         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 3999             MSK_ADDR_HI(addr));
 4000         /* Set the list last index. */
 4001         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 4002             count);
 4003         /* Turn on prefetch unit. */
 4004         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4005             PREF_UNIT_OP_ON);
 4006         /* Dummy read to ensure write. */
 4007         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 4008 }
 4009 
 4010 static void
 4011 msk_stop(struct msk_if_softc *sc_if)
 4012 {
 4013         struct msk_softc *sc;
 4014         struct msk_txdesc *txd;
 4015         struct msk_rxdesc *rxd;
 4016         struct msk_rxdesc *jrxd;
 4017         struct ifnet *ifp;
 4018         uint32_t val;
 4019         int i;
 4020 
 4021         MSK_IF_LOCK_ASSERT(sc_if);
 4022         sc = sc_if->msk_softc;
 4023         ifp = sc_if->msk_ifp;
 4024 
 4025         callout_stop(&sc_if->msk_tick_ch);
 4026         sc_if->msk_watchdog_timer = 0;
 4027 
 4028         /* Disable interrupts. */
 4029         if (sc_if->msk_port == MSK_PORT_A) {
 4030                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 4031                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 4032         } else {
 4033                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 4034                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 4035         }
 4036         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4037         CSR_READ_4(sc, B0_HWE_IMSK);
 4038         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4039         CSR_READ_4(sc, B0_IMSK);
 4040 
 4041         /* Disable Tx/Rx MAC. */
 4042         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4043         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 4044         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 4045         /* Read again to ensure writing. */
 4046         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4047         /* Update stats and clear counters. */
 4048         msk_stats_update(sc_if);
 4049 
 4050         /* Stop Tx BMU. */
 4051         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 4052         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4053         for (i = 0; i < MSK_TIMEOUT; i++) {
 4054                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 4055                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4056                             BMU_STOP);
 4057                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4058                 } else
 4059                         break;
 4060                 DELAY(1);
 4061         }
 4062         if (i == MSK_TIMEOUT)
 4063                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 4064         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 4065             RB_RST_SET | RB_DIS_OP_MD);
 4066 
 4067         /* Disable all GMAC interrupt. */
 4068         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 4069         /* Disable PHY interrupt. */
 4070         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 4071 
 4072         /* Disable the RAM Interface Arbiter. */
 4073         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 4074 
 4075         /* Reset the PCI FIFO of the async Tx queue */
 4076         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4077             BMU_RST_SET | BMU_FIFO_RST);
 4078 
 4079         /* Reset the Tx prefetch units. */
 4080         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 4081             PREF_UNIT_RST_SET);
 4082 
 4083         /* Reset the RAM Buffer async Tx queue. */
 4084         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 4085 
 4086         /* Reset Tx MAC FIFO. */
 4087         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 4088         /* Set Pause Off. */
 4089         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 4090 
 4091         /*
 4092          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 4093          * reach the end of packet and since we can't make sure that we have
 4094          * incoming data, we must reset the BMU while it is not during a DMA
 4095          * transfer. Since it is possible that the Rx path is still active,
 4096          * the Rx RAM buffer will be stopped first, so any possible incoming
 4097          * data will not trigger a DMA. After the RAM buffer is stopped, the
 4098          * BMU is polled until any DMA in progress is ended and only then it
 4099          * will be reset.
 4100          */
 4101 
 4102         /* Disable the RAM Buffer receive queue. */
 4103         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 4104         for (i = 0; i < MSK_TIMEOUT; i++) {
 4105                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 4106                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 4107                         break;
 4108                 DELAY(1);
 4109         }
 4110         if (i == MSK_TIMEOUT)
 4111                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 4112         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 4113             BMU_RST_SET | BMU_FIFO_RST);
 4114         /* Reset the Rx prefetch unit. */
 4115         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 4116             PREF_UNIT_RST_SET);
 4117         /* Reset the RAM Buffer receive queue. */
 4118         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 4119         /* Reset Rx MAC FIFO. */
 4120         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 4121 
 4122         /* Free Rx and Tx mbufs still in the queues. */
 4123         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 4124                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 4125                 if (rxd->rx_m != NULL) {
 4126                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
 4127                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4128                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 4129                             rxd->rx_dmamap);
 4130                         m_freem(rxd->rx_m);
 4131                         rxd->rx_m = NULL;
 4132                 }
 4133         }
 4134         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 4135                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 4136                 if (jrxd->rx_m != NULL) {
 4137                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4138                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4139                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4140                             jrxd->rx_dmamap);
 4141                         m_freem(jrxd->rx_m);
 4142                         jrxd->rx_m = NULL;
 4143                 }
 4144         }
 4145         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 4146                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 4147                 if (txd->tx_m != NULL) {
 4148                         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
 4149                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 4150                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 4151                             txd->tx_dmamap);
 4152                         m_freem(txd->tx_m);
 4153                         txd->tx_m = NULL;
 4154                 }
 4155         }
 4156 
 4157         /*
 4158          * Mark the interface down.
 4159          */
 4160         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 4161         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4162 }
 4163 
 4164 /*
 4165  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
 4166  * counter clears high 16 bits of the counter such that accessing
 4167  * lower 16 bits should be the last operation.
 4168  */
 4169 #define MSK_READ_MIB32(x, y)                                    \
 4170         (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +       \
 4171         (uint32_t)GMAC_READ_2(sc, x, y)
 4172 #define MSK_READ_MIB64(x, y)                                    \
 4173         (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +        \
 4174         (uint64_t)MSK_READ_MIB32(x, y)
 4175 
 4176 static void
 4177 msk_stats_clear(struct msk_if_softc *sc_if)
 4178 {
 4179         struct msk_softc *sc;
 4180         uint32_t reg;
 4181         uint16_t gmac;
 4182         int i;
 4183 
 4184         MSK_IF_LOCK_ASSERT(sc_if);
 4185 
 4186         sc = sc_if->msk_softc;
 4187         /* Set MIB Clear Counter Mode. */
 4188         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4189         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4190         /* Read all MIB Counters with Clear Mode set. */
 4191         for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i++)
 4192                 reg = MSK_READ_MIB32(sc_if->msk_port, i);
 4193         /* Clear MIB Clear Counter Mode. */
 4194         gmac &= ~GM_PAR_MIB_CLR;
 4195         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4196 }
 4197 
 4198 static void
 4199 msk_stats_update(struct msk_if_softc *sc_if)
 4200 {
 4201         struct msk_softc *sc;
 4202         struct ifnet *ifp;
 4203         struct msk_hw_stats *stats;
 4204         uint16_t gmac;
 4205         uint32_t reg;
 4206 
 4207         MSK_IF_LOCK_ASSERT(sc_if);
 4208 
 4209         ifp = sc_if->msk_ifp;
 4210         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4211                 return;
 4212         sc = sc_if->msk_softc;
 4213         stats = &sc_if->msk_stats;
 4214         /* Set MIB Clear Counter Mode. */
 4215         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4216         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4217 
 4218         /* Rx stats. */
 4219         stats->rx_ucast_frames +=
 4220             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
 4221         stats->rx_bcast_frames +=
 4222             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
 4223         stats->rx_pause_frames +=
 4224             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
 4225         stats->rx_mcast_frames +=
 4226             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
 4227         stats->rx_crc_errs +=
 4228             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
 4229         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
 4230         stats->rx_good_octets +=
 4231             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
 4232         stats->rx_bad_octets +=
 4233             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
 4234         stats->rx_runts +=
 4235             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
 4236         stats->rx_runt_errs +=
 4237             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
 4238         stats->rx_pkts_64 +=
 4239             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
 4240         stats->rx_pkts_65_127 +=
 4241             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
 4242         stats->rx_pkts_128_255 +=
 4243             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
 4244         stats->rx_pkts_256_511 +=
 4245             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
 4246         stats->rx_pkts_512_1023 +=
 4247             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
 4248         stats->rx_pkts_1024_1518 +=
 4249             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
 4250         stats->rx_pkts_1519_max +=
 4251             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
 4252         stats->rx_pkts_too_long +=
 4253             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
 4254         stats->rx_pkts_jabbers +=
 4255             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
 4256         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
 4257         stats->rx_fifo_oflows +=
 4258             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
 4259         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
 4260 
 4261         /* Tx stats. */
 4262         stats->tx_ucast_frames +=
 4263             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
 4264         stats->tx_bcast_frames +=
 4265             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
 4266         stats->tx_pause_frames +=
 4267             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
 4268         stats->tx_mcast_frames +=
 4269             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
 4270         stats->tx_octets +=
 4271             MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
 4272         stats->tx_pkts_64 +=
 4273             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
 4274         stats->tx_pkts_65_127 +=
 4275             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
 4276         stats->tx_pkts_128_255 +=
 4277             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
 4278         stats->tx_pkts_256_511 +=
 4279             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
 4280         stats->tx_pkts_512_1023 +=
 4281             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
 4282         stats->tx_pkts_1024_1518 +=
 4283             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
 4284         stats->tx_pkts_1519_max +=
 4285             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
 4286         reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
 4287         stats->tx_colls +=
 4288             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
 4289         stats->tx_late_colls +=
 4290             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
 4291         stats->tx_excess_colls +=
 4292             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
 4293         stats->tx_multi_colls +=
 4294             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
 4295         stats->tx_single_colls +=
 4296             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
 4297         stats->tx_underflows +=
 4298             MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
 4299         /* Clear MIB Clear Counter Mode. */
 4300         gmac &= ~GM_PAR_MIB_CLR;
 4301         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4302 }
 4303 
 4304 static int
 4305 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
 4306 {
 4307         struct msk_softc *sc;
 4308         struct msk_if_softc *sc_if;
 4309         uint32_t result, *stat;
 4310         int off;
 4311 
 4312         sc_if = (struct msk_if_softc *)arg1;
 4313         sc = sc_if->msk_softc;
 4314         off = arg2;
 4315         stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
 4316 
 4317         MSK_IF_LOCK(sc_if);
 4318         result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4319         result += *stat;
 4320         MSK_IF_UNLOCK(sc_if);
 4321 
 4322         return (sysctl_handle_int(oidp, &result, 0, req));
 4323 }
 4324 
 4325 static int
 4326 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
 4327 {
 4328         struct msk_softc *sc;
 4329         struct msk_if_softc *sc_if;
 4330         uint64_t result, *stat;
 4331         int off;
 4332 
 4333         sc_if = (struct msk_if_softc *)arg1;
 4334         sc = sc_if->msk_softc;
 4335         off = arg2;
 4336         stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
 4337 
 4338         MSK_IF_LOCK(sc_if);
 4339         result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4340         result += *stat;
 4341         MSK_IF_UNLOCK(sc_if);
 4342 
 4343         return (sysctl_handle_quad(oidp, &result, 0, req));
 4344 }
 4345 
 4346 #undef MSK_READ_MIB32
 4347 #undef MSK_READ_MIB64
 4348 
 4349 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)                            \
 4350         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4351             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,    \
 4352             "IU", d)
 4353 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)                            \
 4354         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4355             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,    \
 4356             "Q", d)
 4357 
 4358 static void
 4359 msk_sysctl_node(struct msk_if_softc *sc_if)
 4360 {
 4361         struct sysctl_ctx_list *ctx;
 4362         struct sysctl_oid_list *child, *schild;
 4363         struct sysctl_oid *tree;
 4364 
 4365         ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
 4366         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
 4367 
 4368         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 4369             NULL, "MSK Statistics");
 4370         schild = child = SYSCTL_CHILDREN(tree);
 4371         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
 4372             NULL, "MSK RX Statistics");
 4373         child = SYSCTL_CHILDREN(tree);
 4374         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4375             child, rx_ucast_frames, "Good unicast frames");
 4376         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4377             child, rx_bcast_frames, "Good broadcast frames");
 4378         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4379             child, rx_pause_frames, "Pause frames");
 4380         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4381             child, rx_mcast_frames, "Multicast frames");
 4382         MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
 4383             child, rx_crc_errs, "CRC errors");
 4384         MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
 4385             child, rx_good_octets, "Good octets");
 4386         MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
 4387             child, rx_bad_octets, "Bad octets");
 4388         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4389             child, rx_pkts_64, "64 bytes frames");
 4390         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4391             child, rx_pkts_65_127, "65 to 127 bytes frames");
 4392         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4393             child, rx_pkts_128_255, "128 to 255 bytes frames");
 4394         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4395             child, rx_pkts_256_511, "256 to 511 bytes frames");
 4396         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4397             child, rx_pkts_512_1023, "512 to 1023 bytes frames");
 4398         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4399             child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4400         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4401             child, rx_pkts_1519_max, "1519 to max frames");
 4402         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
 4403             child, rx_pkts_too_long, "frames too long");
 4404         MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
 4405             child, rx_pkts_jabbers, "Jabber errors");
 4406         MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
 4407             child, rx_fifo_oflows, "FIFO overflows");
 4408 
 4409         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
 4410             NULL, "MSK TX Statistics");
 4411         child = SYSCTL_CHILDREN(tree);
 4412         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4413             child, tx_ucast_frames, "Unicast frames");
 4414         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4415             child, tx_bcast_frames, "Broadcast frames");
 4416         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4417             child, tx_pause_frames, "Pause frames");
 4418         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4419             child, tx_mcast_frames, "Multicast frames");
 4420         MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
 4421             child, tx_octets, "Octets");
 4422         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4423             child, tx_pkts_64, "64 bytes frames");
 4424         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4425             child, tx_pkts_65_127, "65 to 127 bytes frames");
 4426         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4427             child, tx_pkts_128_255, "128 to 255 bytes frames");
 4428         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4429             child, tx_pkts_256_511, "256 to 511 bytes frames");
 4430         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4431             child, tx_pkts_512_1023, "512 to 1023 bytes frames");
 4432         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4433             child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4434         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4435             child, tx_pkts_1519_max, "1519 to max frames");
 4436         MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
 4437             child, tx_colls, "Collisions");
 4438         MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
 4439             child, tx_late_colls, "Late collisions");
 4440         MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
 4441             child, tx_excess_colls, "Excessive collisions");
 4442         MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
 4443             child, tx_multi_colls, "Multiple collisions");
 4444         MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
 4445             child, tx_single_colls, "Single collisions");
 4446         MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
 4447             child, tx_underflows, "FIFO underflows");
 4448 }
 4449 
 4450 #undef MSK_SYSCTL_STAT32
 4451 #undef MSK_SYSCTL_STAT64
 4452 
 4453 static int
 4454 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 4455 {
 4456         int error, value;
 4457 
 4458         if (!arg1)
 4459                 return (EINVAL);
 4460         value = *(int *)arg1;
 4461         error = sysctl_handle_int(oidp, &value, 0, req);
 4462         if (error || !req->newptr)
 4463                 return (error);
 4464         if (value < low || value > high)
 4465                 return (EINVAL);
 4466         *(int *)arg1 = value;
 4467 
 4468         return (0);
 4469 }
 4470 
 4471 static int
 4472 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
 4473 {
 4474 
 4475         return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
 4476             MSK_PROC_MAX));
 4477 }

Cache object: 0328fdeb7a9b17532fad635b98800eb1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.