The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * Copyright (c) 1997, 1998, 1999, 2000
   50  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   51  *
   52  * Redistribution and use in source and binary forms, with or without
   53  * modification, are permitted provided that the following conditions
   54  * are met:
   55  * 1. Redistributions of source code must retain the above copyright
   56  *    notice, this list of conditions and the following disclaimer.
   57  * 2. Redistributions in binary form must reproduce the above copyright
   58  *    notice, this list of conditions and the following disclaimer in the
   59  *    documentation and/or other materials provided with the distribution.
   60  * 3. All advertising materials mentioning features or use of this software
   61  *    must display the following acknowledgement:
   62  *      This product includes software developed by Bill Paul.
   63  * 4. Neither the name of the author nor the names of any co-contributors
   64  *    may be used to endorse or promote products derived from this software
   65  *    without specific prior written permission.
   66  *
   67  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   70  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   71  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   72  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   73  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   74  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   75  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   76  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   77  * THE POSSIBILITY OF SUCH DAMAGE.
   78  */
   79 /*-
   80  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   81  *
   82  * Permission to use, copy, modify, and distribute this software for any
   83  * purpose with or without fee is hereby granted, provided that the above
   84  * copyright notice and this permission notice appear in all copies.
   85  *
   86  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   87  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   88  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   89  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   90  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   91  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   92  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   93  */
   94 
   95 /*
   96  * Device driver for the Marvell Yukon II Ethernet controller.
   97  * Due to lack of documentation, this driver is based on the code from
   98  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
   99  */
  100 
  101 #include <sys/cdefs.h>
  102 __FBSDID("$FreeBSD$");
  103 
  104 #include <sys/param.h>
  105 #include <sys/systm.h>
  106 #include <sys/bus.h>
  107 #include <sys/endian.h>
  108 #include <sys/mbuf.h>
  109 #include <sys/malloc.h>
  110 #include <sys/kernel.h>
  111 #include <sys/module.h>
  112 #include <sys/socket.h>
  113 #include <sys/sockio.h>
  114 #include <sys/queue.h>
  115 #include <sys/sysctl.h>
  116 
  117 #include <net/bpf.h>
  118 #include <net/ethernet.h>
  119 #include <net/if.h>
  120 #include <net/if_arp.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/if_types.h>
  124 #include <net/if_vlan_var.h>
  125 
  126 #include <netinet/in.h>
  127 #include <netinet/in_systm.h>
  128 #include <netinet/ip.h>
  129 #include <netinet/tcp.h>
  130 #include <netinet/udp.h>
  131 
  132 #include <machine/bus.h>
  133 #include <machine/in_cksum.h>
  134 #include <machine/resource.h>
  135 #include <sys/rman.h>
  136 
  137 #include <dev/mii/mii.h>
  138 #include <dev/mii/miivar.h>
  139 
  140 #include <dev/pci/pcireg.h>
  141 #include <dev/pci/pcivar.h>
  142 
  143 #include <dev/msk/if_mskreg.h>
  144 
  145 MODULE_DEPEND(msk, pci, 1, 1, 1);
  146 MODULE_DEPEND(msk, ether, 1, 1, 1);
  147 MODULE_DEPEND(msk, miibus, 1, 1, 1);
  148 
  149 /* "device miibus" required.  See GENERIC if you get errors here. */
  150 #include "miibus_if.h"
  151 
  152 /* Tunables. */
  153 static int msi_disable = 0;
  154 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
  155 static int legacy_intr = 0;
  156 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
  157 static int jumbo_disable = 0;
  158 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
  159 
  160 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  161 
  162 /*
  163  * Devices supported by this driver.
  164  */
  165 static const struct msk_product {
  166         uint16_t        msk_vendorid;
  167         uint16_t        msk_deviceid;
  168         const char      *msk_name;
  169 } msk_products[] = {
  170         { VENDORID_SK, DEVICEID_SK_YUKON2,
  171             "SK-9Sxx Gigabit Ethernet" },
  172         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  173             "SK-9Exx Gigabit Ethernet"},
  174         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  175             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  176         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  177             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  178         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  179             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  180         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  181             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  182         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  183             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  184         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  185             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  186         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  187             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  188         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  189             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  190         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  191             "Marvell Yukon 88E8035 Fast Ethernet" },
  192         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  193             "Marvell Yukon 88E8036 Fast Ethernet" },
  194         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  195             "Marvell Yukon 88E8038 Fast Ethernet" },
  196         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  197             "Marvell Yukon 88E8039 Fast Ethernet" },
  198         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  199             "Marvell Yukon 88E8040 Fast Ethernet" },
  200         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  201             "Marvell Yukon 88E8040T Fast Ethernet" },
  202         { VENDORID_MARVELL, DEVICEID_MRVL_8042,
  203             "Marvell Yukon 88E8042 Fast Ethernet" },
  204         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  205             "Marvell Yukon 88E8048 Fast Ethernet" },
  206         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  207             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  208         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  209             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  210         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  211             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  212         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  213             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  214         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  215             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  216         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  217             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  218         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  219             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  220         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  221             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  222         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  223             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  224         { VENDORID_MARVELL, DEVICEID_MRVL_436D,
  225             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  226         { VENDORID_MARVELL, DEVICEID_MRVL_4370,
  227             "Marvell Yukon 88E8075 Gigabit Ethernet" },
  228         { VENDORID_MARVELL, DEVICEID_MRVL_4380,
  229             "Marvell Yukon 88E8057 Gigabit Ethernet" },
  230         { VENDORID_MARVELL, DEVICEID_MRVL_4381,
  231             "Marvell Yukon 88E8059 Gigabit Ethernet" },
  232         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  233             "D-Link 550SX Gigabit Ethernet" },
  234         { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
  235             "D-Link 560SX Gigabit Ethernet" },
  236         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  237             "D-Link 560T Gigabit Ethernet" }
  238 };
  239 
  240 static const char *model_name[] = {
  241         "Yukon XL",
  242         "Yukon EC Ultra",
  243         "Yukon EX",
  244         "Yukon EC",
  245         "Yukon FE",
  246         "Yukon FE+",
  247         "Yukon Supreme",
  248         "Yukon Ultra 2",
  249         "Yukon Unknown",
  250         "Yukon Optima",
  251 };
  252 
  253 static int mskc_probe(device_t);
  254 static int mskc_attach(device_t);
  255 static int mskc_detach(device_t);
  256 static int mskc_shutdown(device_t);
  257 static int mskc_setup_rambuffer(struct msk_softc *);
  258 static int mskc_suspend(device_t);
  259 static int mskc_resume(device_t);
  260 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
  261 static void mskc_reset(struct msk_softc *);
  262 
  263 static int msk_probe(device_t);
  264 static int msk_attach(device_t);
  265 static int msk_detach(device_t);
  266 
  267 static void msk_tick(void *);
  268 static void msk_intr(void *);
  269 static void msk_intr_phy(struct msk_if_softc *);
  270 static void msk_intr_gmac(struct msk_if_softc *);
  271 static __inline void msk_rxput(struct msk_if_softc *);
  272 static int msk_handle_events(struct msk_softc *);
  273 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  274 static void msk_intr_hwerr(struct msk_softc *);
  275 #ifndef __NO_STRICT_ALIGNMENT
  276 static __inline void msk_fixup_rx(struct mbuf *);
  277 #endif
  278 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
  279 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  280 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  281 static void msk_txeof(struct msk_if_softc *, int);
  282 static int msk_encap(struct msk_if_softc *, struct mbuf **);
  283 static void msk_start(struct ifnet *);
  284 static void msk_start_locked(struct ifnet *);
  285 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
  286 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  287 static void msk_set_rambuffer(struct msk_if_softc *);
  288 static void msk_set_tx_stfwd(struct msk_if_softc *);
  289 static void msk_init(void *);
  290 static void msk_init_locked(struct msk_if_softc *);
  291 static void msk_stop(struct msk_if_softc *);
  292 static void msk_watchdog(struct msk_if_softc *);
  293 static int msk_mediachange(struct ifnet *);
  294 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
  295 static void msk_phy_power(struct msk_softc *, int);
  296 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  297 static int msk_status_dma_alloc(struct msk_softc *);
  298 static void msk_status_dma_free(struct msk_softc *);
  299 static int msk_txrx_dma_alloc(struct msk_if_softc *);
  300 static int msk_rx_dma_jalloc(struct msk_if_softc *);
  301 static void msk_txrx_dma_free(struct msk_if_softc *);
  302 static void msk_rx_dma_jfree(struct msk_if_softc *);
  303 static int msk_rx_fill(struct msk_if_softc *, int);
  304 static int msk_init_rx_ring(struct msk_if_softc *);
  305 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  306 static void msk_init_tx_ring(struct msk_if_softc *);
  307 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
  308 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  309 static int msk_newbuf(struct msk_if_softc *, int);
  310 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  311 
  312 static int msk_phy_readreg(struct msk_if_softc *, int, int);
  313 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
  314 static int msk_miibus_readreg(device_t, int, int);
  315 static int msk_miibus_writereg(device_t, int, int, int);
  316 static void msk_miibus_statchg(device_t);
  317 
  318 static void msk_rxfilter(struct msk_if_softc *);
  319 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
  320 
  321 static void msk_stats_clear(struct msk_if_softc *);
  322 static void msk_stats_update(struct msk_if_softc *);
  323 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
  324 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
  325 static void msk_sysctl_node(struct msk_if_softc *);
  326 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  327 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
  328 
  329 static device_method_t mskc_methods[] = {
  330         /* Device interface */
  331         DEVMETHOD(device_probe,         mskc_probe),
  332         DEVMETHOD(device_attach,        mskc_attach),
  333         DEVMETHOD(device_detach,        mskc_detach),
  334         DEVMETHOD(device_suspend,       mskc_suspend),
  335         DEVMETHOD(device_resume,        mskc_resume),
  336         DEVMETHOD(device_shutdown,      mskc_shutdown),
  337 
  338         DEVMETHOD(bus_get_dma_tag,      mskc_get_dma_tag),
  339 
  340         DEVMETHOD_END
  341 };
  342 
  343 static driver_t mskc_driver = {
  344         "mskc",
  345         mskc_methods,
  346         sizeof(struct msk_softc)
  347 };
  348 
  349 static devclass_t mskc_devclass;
  350 
  351 static device_method_t msk_methods[] = {
  352         /* Device interface */
  353         DEVMETHOD(device_probe,         msk_probe),
  354         DEVMETHOD(device_attach,        msk_attach),
  355         DEVMETHOD(device_detach,        msk_detach),
  356         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  357 
  358         /* MII interface */
  359         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  360         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  361         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  362 
  363         DEVMETHOD_END
  364 };
  365 
  366 static driver_t msk_driver = {
  367         "msk",
  368         msk_methods,
  369         sizeof(struct msk_if_softc)
  370 };
  371 
  372 static devclass_t msk_devclass;
  373 
  374 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
  375 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
  376 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
  377 
  378 static struct resource_spec msk_res_spec_io[] = {
  379         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  380         { -1,                   0,              0 }
  381 };
  382 
  383 static struct resource_spec msk_res_spec_mem[] = {
  384         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  385         { -1,                   0,              0 }
  386 };
  387 
  388 static struct resource_spec msk_irq_spec_legacy[] = {
  389         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  390         { -1,                   0,              0 }
  391 };
  392 
  393 static struct resource_spec msk_irq_spec_msi[] = {
  394         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  395         { -1,                   0,              0 }
  396 };
  397 
  398 static int
  399 msk_miibus_readreg(device_t dev, int phy, int reg)
  400 {
  401         struct msk_if_softc *sc_if;
  402 
  403         sc_if = device_get_softc(dev);
  404 
  405         return (msk_phy_readreg(sc_if, phy, reg));
  406 }
  407 
  408 static int
  409 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  410 {
  411         struct msk_softc *sc;
  412         int i, val;
  413 
  414         sc = sc_if->msk_softc;
  415 
  416         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  417             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  418 
  419         for (i = 0; i < MSK_TIMEOUT; i++) {
  420                 DELAY(1);
  421                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  422                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  423                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  424                         break;
  425                 }
  426         }
  427 
  428         if (i == MSK_TIMEOUT) {
  429                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  430                 val = 0;
  431         }
  432 
  433         return (val);
  434 }
  435 
  436 static int
  437 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  438 {
  439         struct msk_if_softc *sc_if;
  440 
  441         sc_if = device_get_softc(dev);
  442 
  443         return (msk_phy_writereg(sc_if, phy, reg, val));
  444 }
  445 
  446 static int
  447 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  448 {
  449         struct msk_softc *sc;
  450         int i;
  451 
  452         sc = sc_if->msk_softc;
  453 
  454         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  455         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  456             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  457         for (i = 0; i < MSK_TIMEOUT; i++) {
  458                 DELAY(1);
  459                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  460                     GM_SMI_CT_BUSY) == 0)
  461                         break;
  462         }
  463         if (i == MSK_TIMEOUT)
  464                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  465 
  466         return (0);
  467 }
  468 
  469 static void
  470 msk_miibus_statchg(device_t dev)
  471 {
  472         struct msk_softc *sc;
  473         struct msk_if_softc *sc_if;
  474         struct mii_data *mii;
  475         struct ifnet *ifp;
  476         uint32_t gmac;
  477 
  478         sc_if = device_get_softc(dev);
  479         sc = sc_if->msk_softc;
  480 
  481         MSK_IF_LOCK_ASSERT(sc_if);
  482 
  483         mii = device_get_softc(sc_if->msk_miibus);
  484         ifp = sc_if->msk_ifp;
  485         if (mii == NULL || ifp == NULL ||
  486             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  487                 return;
  488 
  489         sc_if->msk_flags &= ~MSK_FLAG_LINK;
  490         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  491             (IFM_AVALID | IFM_ACTIVE)) {
  492                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  493                 case IFM_10_T:
  494                 case IFM_100_TX:
  495                         sc_if->msk_flags |= MSK_FLAG_LINK;
  496                         break;
  497                 case IFM_1000_T:
  498                 case IFM_1000_SX:
  499                 case IFM_1000_LX:
  500                 case IFM_1000_CX:
  501                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  502                                 sc_if->msk_flags |= MSK_FLAG_LINK;
  503                         break;
  504                 default:
  505                         break;
  506                 }
  507         }
  508 
  509         if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
  510                 /* Enable Tx FIFO Underrun. */
  511                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  512                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  513                 /*
  514                  * Because mii(4) notify msk(4) that it detected link status
  515                  * change, there is no need to enable automatic
  516                  * speed/flow-control/duplex updates.
  517                  */
  518                 gmac = GM_GPCR_AU_ALL_DIS;
  519                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  520                 case IFM_1000_SX:
  521                 case IFM_1000_T:
  522                         gmac |= GM_GPCR_SPEED_1000;
  523                         break;
  524                 case IFM_100_TX:
  525                         gmac |= GM_GPCR_SPEED_100;
  526                         break;
  527                 case IFM_10_T:
  528                         break;
  529                 }
  530 
  531                 if ((IFM_OPTIONS(mii->mii_media_active) &
  532                     IFM_ETH_RXPAUSE) == 0)
  533                         gmac |= GM_GPCR_FC_RX_DIS;
  534                 if ((IFM_OPTIONS(mii->mii_media_active) &
  535                      IFM_ETH_TXPAUSE) == 0)
  536                         gmac |= GM_GPCR_FC_TX_DIS;
  537                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  538                         gmac |= GM_GPCR_DUP_FULL;
  539                 else
  540                         gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
  541                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  542                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  543                 /* Read again to ensure writing. */
  544                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  545                 gmac = GMC_PAUSE_OFF;
  546                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  547                         if ((IFM_OPTIONS(mii->mii_media_active) &
  548                             IFM_ETH_RXPAUSE) != 0)
  549                                 gmac = GMC_PAUSE_ON;
  550                 }
  551                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  552 
  553                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  554                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  555                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  556         } else {
  557                 /*
  558                  * Link state changed to down.
  559                  * Disable PHY interrupts.
  560                  */
  561                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  562                 /* Disable Rx/Tx MAC. */
  563                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  564                 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
  565                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  566                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  567                         /* Read again to ensure writing. */
  568                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  569                 }
  570         }
  571 }
  572 
  573 static void
  574 msk_rxfilter(struct msk_if_softc *sc_if)
  575 {
  576         struct msk_softc *sc;
  577         struct ifnet *ifp;
  578         struct ifmultiaddr *ifma;
  579         uint32_t mchash[2];
  580         uint32_t crc;
  581         uint16_t mode;
  582 
  583         sc = sc_if->msk_softc;
  584 
  585         MSK_IF_LOCK_ASSERT(sc_if);
  586 
  587         ifp = sc_if->msk_ifp;
  588 
  589         bzero(mchash, sizeof(mchash));
  590         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  591         if ((ifp->if_flags & IFF_PROMISC) != 0)
  592                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  593         else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  594                 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  595                 mchash[0] = 0xffff;
  596                 mchash[1] = 0xffff;
  597         } else {
  598                 mode |= GM_RXCR_UCF_ENA;
  599                 if_maddr_rlock(ifp);
  600                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  601                         if (ifma->ifma_addr->sa_family != AF_LINK)
  602                                 continue;
  603                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  604                             ifma->ifma_addr), ETHER_ADDR_LEN);
  605                         /* Just want the 6 least significant bits. */
  606                         crc &= 0x3f;
  607                         /* Set the corresponding bit in the hash table. */
  608                         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  609                 }
  610                 if_maddr_runlock(ifp);
  611                 if (mchash[0] != 0 || mchash[1] != 0)
  612                         mode |= GM_RXCR_MCF_ENA;
  613         }
  614 
  615         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  616             mchash[0] & 0xffff);
  617         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  618             (mchash[0] >> 16) & 0xffff);
  619         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  620             mchash[1] & 0xffff);
  621         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  622             (mchash[1] >> 16) & 0xffff);
  623         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  624 }
  625 
  626 static void
  627 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  628 {
  629         struct msk_softc *sc;
  630 
  631         sc = sc_if->msk_softc;
  632         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  633                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  634                     RX_VLAN_STRIP_ON);
  635                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  636                     TX_VLAN_TAG_ON);
  637         } else {
  638                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  639                     RX_VLAN_STRIP_OFF);
  640                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  641                     TX_VLAN_TAG_OFF);
  642         }
  643 }
  644 
  645 static int
  646 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
  647 {
  648         uint16_t idx;
  649         int i;
  650 
  651         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  652             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  653                 /* Wait until controller executes OP_TCPSTART command. */
  654                 for (i = 100; i > 0; i--) {
  655                         DELAY(100);
  656                         idx = CSR_READ_2(sc_if->msk_softc,
  657                             Y2_PREF_Q_ADDR(sc_if->msk_rxq,
  658                             PREF_UNIT_GET_IDX_REG));
  659                         if (idx != 0)
  660                                 break;
  661                 }
  662                 if (i == 0) {
  663                         device_printf(sc_if->msk_if_dev,
  664                             "prefetch unit stuck?\n");
  665                         return (ETIMEDOUT);
  666                 }
  667                 /*
  668                  * Fill consumed LE with free buffer. This can be done
  669                  * in Rx handler but we don't want to add special code
  670                  * in fast handler.
  671                  */
  672                 if (jumbo > 0) {
  673                         if (msk_jumbo_newbuf(sc_if, 0) != 0)
  674                                 return (ENOBUFS);
  675                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  676                             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  677                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  678                 } else {
  679                         if (msk_newbuf(sc_if, 0) != 0)
  680                                 return (ENOBUFS);
  681                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  682                             sc_if->msk_cdata.msk_rx_ring_map,
  683                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  684                 }
  685                 sc_if->msk_cdata.msk_rx_prod = 0;
  686                 CSR_WRITE_2(sc_if->msk_softc,
  687                     Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  688                     sc_if->msk_cdata.msk_rx_prod);
  689         }
  690         return (0);
  691 }
  692 
  693 static int
  694 msk_init_rx_ring(struct msk_if_softc *sc_if)
  695 {
  696         struct msk_ring_data *rd;
  697         struct msk_rxdesc *rxd;
  698         int i, nbuf, prod;
  699 
  700         MSK_IF_LOCK_ASSERT(sc_if);
  701 
  702         sc_if->msk_cdata.msk_rx_cons = 0;
  703         sc_if->msk_cdata.msk_rx_prod = 0;
  704         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  705 
  706         rd = &sc_if->msk_rdata;
  707         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  708         for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
  709                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  710                 rxd->rx_m = NULL;
  711                 rxd->rx_le = &rd->msk_rx_ring[prod];
  712                 MSK_INC(prod, MSK_RX_RING_CNT);
  713         }
  714         nbuf = MSK_RX_BUF_CNT;
  715         prod = 0;
  716         /* Have controller know how to compute Rx checksum. */
  717         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  718             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  719 #ifdef MSK_64BIT_DMA
  720                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  721                 rxd->rx_m = NULL;
  722                 rxd->rx_le = &rd->msk_rx_ring[prod];
  723                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  724                     ETHER_HDR_LEN);
  725                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  726                 MSK_INC(prod, MSK_RX_RING_CNT);
  727                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  728 #endif
  729                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  730                 rxd->rx_m = NULL;
  731                 rxd->rx_le = &rd->msk_rx_ring[prod];
  732                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  733                     ETHER_HDR_LEN);
  734                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  735                 MSK_INC(prod, MSK_RX_RING_CNT);
  736                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  737                 nbuf--;
  738         }
  739         for (i = 0; i < nbuf; i++) {
  740                 if (msk_newbuf(sc_if, prod) != 0)
  741                         return (ENOBUFS);
  742                 MSK_RX_INC(prod, MSK_RX_RING_CNT);
  743         }
  744 
  745         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  746             sc_if->msk_cdata.msk_rx_ring_map,
  747             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  748 
  749         /* Update prefetch unit. */
  750         sc_if->msk_cdata.msk_rx_prod = prod;
  751         CSR_WRITE_2(sc_if->msk_softc,
  752             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  753             (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
  754             MSK_RX_RING_CNT);
  755         if (msk_rx_fill(sc_if, 0) != 0)
  756                 return (ENOBUFS);
  757         return (0);
  758 }
  759 
  760 static int
  761 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  762 {
  763         struct msk_ring_data *rd;
  764         struct msk_rxdesc *rxd;
  765         int i, nbuf, prod;
  766 
  767         MSK_IF_LOCK_ASSERT(sc_if);
  768 
  769         sc_if->msk_cdata.msk_rx_cons = 0;
  770         sc_if->msk_cdata.msk_rx_prod = 0;
  771         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  772 
  773         rd = &sc_if->msk_rdata;
  774         bzero(rd->msk_jumbo_rx_ring,
  775             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  776         for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  777                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  778                 rxd->rx_m = NULL;
  779                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  780                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  781         }
  782         nbuf = MSK_RX_BUF_CNT;
  783         prod = 0;
  784         /* Have controller know how to compute Rx checksum. */
  785         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  786             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  787 #ifdef MSK_64BIT_DMA
  788                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  789                 rxd->rx_m = NULL;
  790                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  791                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  792                     ETHER_HDR_LEN);
  793                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  794                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  795                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  796 #endif
  797                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  798                 rxd->rx_m = NULL;
  799                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  800                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  801                     ETHER_HDR_LEN);
  802                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  803                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  804                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  805                 nbuf--;
  806         }
  807         for (i = 0; i < nbuf; i++) {
  808                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  809                         return (ENOBUFS);
  810                 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
  811         }
  812 
  813         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  814             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  815             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  816 
  817         /* Update prefetch unit. */
  818         sc_if->msk_cdata.msk_rx_prod = prod;
  819         CSR_WRITE_2(sc_if->msk_softc,
  820             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  821             (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
  822             MSK_JUMBO_RX_RING_CNT);
  823         if (msk_rx_fill(sc_if, 1) != 0)
  824                 return (ENOBUFS);
  825         return (0);
  826 }
  827 
  828 static void
  829 msk_init_tx_ring(struct msk_if_softc *sc_if)
  830 {
  831         struct msk_ring_data *rd;
  832         struct msk_txdesc *txd;
  833         int i;
  834 
  835         sc_if->msk_cdata.msk_tso_mtu = 0;
  836         sc_if->msk_cdata.msk_last_csum = 0;
  837         sc_if->msk_cdata.msk_tx_prod = 0;
  838         sc_if->msk_cdata.msk_tx_cons = 0;
  839         sc_if->msk_cdata.msk_tx_cnt = 0;
  840         sc_if->msk_cdata.msk_tx_high_addr = 0;
  841 
  842         rd = &sc_if->msk_rdata;
  843         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  844         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  845                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  846                 txd->tx_m = NULL;
  847                 txd->tx_le = &rd->msk_tx_ring[i];
  848         }
  849 
  850         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
  851             sc_if->msk_cdata.msk_tx_ring_map,
  852             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  853 }
  854 
  855 static __inline void
  856 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  857 {
  858         struct msk_rx_desc *rx_le;
  859         struct msk_rxdesc *rxd;
  860         struct mbuf *m;
  861 
  862 #ifdef MSK_64BIT_DMA
  863         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  864         rx_le = rxd->rx_le;
  865         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  866         MSK_INC(idx, MSK_RX_RING_CNT);
  867 #endif
  868         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  869         m = rxd->rx_m;
  870         rx_le = rxd->rx_le;
  871         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  872 }
  873 
  874 static __inline void
  875 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  876 {
  877         struct msk_rx_desc *rx_le;
  878         struct msk_rxdesc *rxd;
  879         struct mbuf *m;
  880 
  881 #ifdef MSK_64BIT_DMA
  882         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  883         rx_le = rxd->rx_le;
  884         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  885         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  886 #endif
  887         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  888         m = rxd->rx_m;
  889         rx_le = rxd->rx_le;
  890         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  891 }
  892 
  893 static int
  894 msk_newbuf(struct msk_if_softc *sc_if, int idx)
  895 {
  896         struct msk_rx_desc *rx_le;
  897         struct msk_rxdesc *rxd;
  898         struct mbuf *m;
  899         bus_dma_segment_t segs[1];
  900         bus_dmamap_t map;
  901         int nsegs;
  902 
  903         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  904         if (m == NULL)
  905                 return (ENOBUFS);
  906 
  907         m->m_len = m->m_pkthdr.len = MCLBYTES;
  908         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  909                 m_adj(m, ETHER_ALIGN);
  910 #ifndef __NO_STRICT_ALIGNMENT
  911         else
  912                 m_adj(m, MSK_RX_BUF_ALIGN);
  913 #endif
  914 
  915         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
  916             sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
  917             BUS_DMA_NOWAIT) != 0) {
  918                 m_freem(m);
  919                 return (ENOBUFS);
  920         }
  921         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  922 
  923         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  924 #ifdef MSK_64BIT_DMA
  925         rx_le = rxd->rx_le;
  926         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  927         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  928         MSK_INC(idx, MSK_RX_RING_CNT);
  929         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  930 #endif
  931         if (rxd->rx_m != NULL) {
  932                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  933                     BUS_DMASYNC_POSTREAD);
  934                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  935                 rxd->rx_m = NULL;
  936         }
  937         map = rxd->rx_dmamap;
  938         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  939         sc_if->msk_cdata.msk_rx_sparemap = map;
  940         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  941             BUS_DMASYNC_PREREAD);
  942         rxd->rx_m = m;
  943         rx_le = rxd->rx_le;
  944         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  945         rx_le->msk_control =
  946             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  947 
  948         return (0);
  949 }
  950 
  951 static int
  952 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  953 {
  954         struct msk_rx_desc *rx_le;
  955         struct msk_rxdesc *rxd;
  956         struct mbuf *m;
  957         bus_dma_segment_t segs[1];
  958         bus_dmamap_t map;
  959         int nsegs;
  960 
  961         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
  962         if (m == NULL)
  963                 return (ENOBUFS);
  964         if ((m->m_flags & M_EXT) == 0) {
  965                 m_freem(m);
  966                 return (ENOBUFS);
  967         }
  968         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
  969         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  970                 m_adj(m, ETHER_ALIGN);
  971 #ifndef __NO_STRICT_ALIGNMENT
  972         else
  973                 m_adj(m, MSK_RX_BUF_ALIGN);
  974 #endif
  975 
  976         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  977             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  978             BUS_DMA_NOWAIT) != 0) {
  979                 m_freem(m);
  980                 return (ENOBUFS);
  981         }
  982         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  983 
  984         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  985 #ifdef MSK_64BIT_DMA
  986         rx_le = rxd->rx_le;
  987         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  988         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  989         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  990         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  991 #endif
  992         if (rxd->rx_m != NULL) {
  993                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  994                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
  995                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
  996                     rxd->rx_dmamap);
  997                 rxd->rx_m = NULL;
  998         }
  999         map = rxd->rx_dmamap;
 1000         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
 1001         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
 1002         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
 1003             BUS_DMASYNC_PREREAD);
 1004         rxd->rx_m = m;
 1005         rx_le = rxd->rx_le;
 1006         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
 1007         rx_le->msk_control =
 1008             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
 1009 
 1010         return (0);
 1011 }
 1012 
 1013 /*
 1014  * Set media options.
 1015  */
 1016 static int
 1017 msk_mediachange(struct ifnet *ifp)
 1018 {
 1019         struct msk_if_softc *sc_if;
 1020         struct mii_data *mii;
 1021         int error;
 1022 
 1023         sc_if = ifp->if_softc;
 1024 
 1025         MSK_IF_LOCK(sc_if);
 1026         mii = device_get_softc(sc_if->msk_miibus);
 1027         error = mii_mediachg(mii);
 1028         MSK_IF_UNLOCK(sc_if);
 1029 
 1030         return (error);
 1031 }
 1032 
 1033 /*
 1034  * Report current media status.
 1035  */
 1036 static void
 1037 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1038 {
 1039         struct msk_if_softc *sc_if;
 1040         struct mii_data *mii;
 1041 
 1042         sc_if = ifp->if_softc;
 1043         MSK_IF_LOCK(sc_if);
 1044         if ((ifp->if_flags & IFF_UP) == 0) {
 1045                 MSK_IF_UNLOCK(sc_if);
 1046                 return;
 1047         }
 1048         mii = device_get_softc(sc_if->msk_miibus);
 1049 
 1050         mii_pollstat(mii);
 1051         ifmr->ifm_active = mii->mii_media_active;
 1052         ifmr->ifm_status = mii->mii_media_status;
 1053         MSK_IF_UNLOCK(sc_if);
 1054 }
 1055 
 1056 static int
 1057 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1058 {
 1059         struct msk_if_softc *sc_if;
 1060         struct ifreq *ifr;
 1061         struct mii_data *mii;
 1062         int error, mask, reinit;
 1063 
 1064         sc_if = ifp->if_softc;
 1065         ifr = (struct ifreq *)data;
 1066         error = 0;
 1067 
 1068         switch(command) {
 1069         case SIOCSIFMTU:
 1070                 MSK_IF_LOCK(sc_if);
 1071                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
 1072                         error = EINVAL;
 1073                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1074                         if (ifr->ifr_mtu > ETHERMTU) {
 1075                                 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 1076                                         error = EINVAL;
 1077                                         MSK_IF_UNLOCK(sc_if);
 1078                                         break;
 1079                                 }
 1080                                 if ((sc_if->msk_flags &
 1081                                     MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1082                                         ifp->if_hwassist &=
 1083                                             ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1084                                         ifp->if_capenable &=
 1085                                             ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1086                                         VLAN_CAPABILITIES(ifp);
 1087                                 }
 1088                         }
 1089                         ifp->if_mtu = ifr->ifr_mtu;
 1090                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1091                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1092                                 msk_init_locked(sc_if);
 1093                         }
 1094                 }
 1095                 MSK_IF_UNLOCK(sc_if);
 1096                 break;
 1097         case SIOCSIFFLAGS:
 1098                 MSK_IF_LOCK(sc_if);
 1099                 if ((ifp->if_flags & IFF_UP) != 0) {
 1100                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1101                             ((ifp->if_flags ^ sc_if->msk_if_flags) &
 1102                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1103                                 msk_rxfilter(sc_if);
 1104                         else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
 1105                                 msk_init_locked(sc_if);
 1106                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1107                         msk_stop(sc_if);
 1108                 sc_if->msk_if_flags = ifp->if_flags;
 1109                 MSK_IF_UNLOCK(sc_if);
 1110                 break;
 1111         case SIOCADDMULTI:
 1112         case SIOCDELMULTI:
 1113                 MSK_IF_LOCK(sc_if);
 1114                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1115                         msk_rxfilter(sc_if);
 1116                 MSK_IF_UNLOCK(sc_if);
 1117                 break;
 1118         case SIOCGIFMEDIA:
 1119         case SIOCSIFMEDIA:
 1120                 mii = device_get_softc(sc_if->msk_miibus);
 1121                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1122                 break;
 1123         case SIOCSIFCAP:
 1124                 reinit = 0;
 1125                 MSK_IF_LOCK(sc_if);
 1126                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1127                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1128                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1129                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1130                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 1131                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
 1132                         else
 1133                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
 1134                 }
 1135                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1136                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
 1137                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1138                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1139                                 reinit = 1;
 1140                 }
 1141                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1142                     (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
 1143                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1144                 if ((mask & IFCAP_TSO4) != 0 &&
 1145                     (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1146                         ifp->if_capenable ^= IFCAP_TSO4;
 1147                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1148                                 ifp->if_hwassist |= CSUM_TSO;
 1149                         else
 1150                                 ifp->if_hwassist &= ~CSUM_TSO;
 1151                 }
 1152                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1153                     (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
 1154                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1155                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1156                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1157                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1158                         if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
 1159                                 ifp->if_capenable &=
 1160                                     ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
 1161                         msk_setvlan(sc_if, ifp);
 1162                 }
 1163                 if (ifp->if_mtu > ETHERMTU &&
 1164                     (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1165                         ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1166                         ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1167                 }
 1168                 VLAN_CAPABILITIES(ifp);
 1169                 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1170                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1171                         msk_init_locked(sc_if);
 1172                 }
 1173                 MSK_IF_UNLOCK(sc_if);
 1174                 break;
 1175         default:
 1176                 error = ether_ioctl(ifp, command, data);
 1177                 break;
 1178         }
 1179 
 1180         return (error);
 1181 }
 1182 
 1183 static int
 1184 mskc_probe(device_t dev)
 1185 {
 1186         const struct msk_product *mp;
 1187         uint16_t vendor, devid;
 1188         int i;
 1189 
 1190         vendor = pci_get_vendor(dev);
 1191         devid = pci_get_device(dev);
 1192         mp = msk_products;
 1193         for (i = 0; i < nitems(msk_products); i++, mp++) {
 1194                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
 1195                         device_set_desc(dev, mp->msk_name);
 1196                         return (BUS_PROBE_DEFAULT);
 1197                 }
 1198         }
 1199 
 1200         return (ENXIO);
 1201 }
 1202 
 1203 static int
 1204 mskc_setup_rambuffer(struct msk_softc *sc)
 1205 {
 1206         int next;
 1207         int i;
 1208 
 1209         /* Get adapter SRAM size. */
 1210         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
 1211         if (bootverbose)
 1212                 device_printf(sc->msk_dev,
 1213                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
 1214         if (sc->msk_ramsize == 0)
 1215                 return (0);
 1216 
 1217         sc->msk_pflags |= MSK_FLAG_RAMBUF;
 1218         /*
 1219          * Give receiver 2/3 of memory and round down to the multiple
 1220          * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
 1221          * of 1024.
 1222          */
 1223         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
 1224         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
 1225         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
 1226                 sc->msk_rxqstart[i] = next;
 1227                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
 1228                 next = sc->msk_rxqend[i] + 1;
 1229                 sc->msk_txqstart[i] = next;
 1230                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
 1231                 next = sc->msk_txqend[i] + 1;
 1232                 if (bootverbose) {
 1233                         device_printf(sc->msk_dev,
 1234                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1235                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1236                             sc->msk_rxqend[i]);
 1237                         device_printf(sc->msk_dev,
 1238                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1239                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1240                             sc->msk_txqend[i]);
 1241                 }
 1242         }
 1243 
 1244         return (0);
 1245 }
 1246 
 1247 static void
 1248 msk_phy_power(struct msk_softc *sc, int mode)
 1249 {
 1250         uint32_t our, val;
 1251         int i;
 1252 
 1253         switch (mode) {
 1254         case MSK_PHY_POWERUP:
 1255                 /* Switch power to VCC (WA for VAUX problem). */
 1256                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1257                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1258                 /* Disable Core Clock Division, set Clock Select to 0. */
 1259                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1260 
 1261                 val = 0;
 1262                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1263                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1264                         /* Enable bits are inverted. */
 1265                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1266                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1267                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1268                 }
 1269                 /*
 1270                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1271                  */
 1272                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1273 
 1274                 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1275                 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1276                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1277                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1278                                 /* Deassert Low Power for 1st PHY. */
 1279                                 our |= PCI_Y2_PHY1_COMA;
 1280                                 if (sc->msk_num_port > 1)
 1281                                         our |= PCI_Y2_PHY2_COMA;
 1282                         }
 1283                 }
 1284                 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
 1285                     sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1286                     sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
 1287                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
 1288                         val &= (PCI_FORCE_ASPM_REQUEST |
 1289                             PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
 1290                             PCI_ASPM_CLKRUN_REQUEST);
 1291                         /* Set all bits to 0 except bits 15..12. */
 1292                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
 1293                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
 1294                         val &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1295                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
 1296                         CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
 1297                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
 1298                         /*
 1299                          * Disable status race, workaround for
 1300                          * Yukon EC Ultra & Yukon EX.
 1301                          */
 1302                         val = CSR_READ_4(sc, B2_GP_IO);
 1303                         val |= GLB_GPIO_STAT_RACE_DIS;
 1304                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1305                         CSR_READ_4(sc, B2_GP_IO);
 1306                 }
 1307                 /* Release PHY from PowerDown/COMA mode. */
 1308                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
 1309 
 1310                 for (i = 0; i < sc->msk_num_port; i++) {
 1311                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1312                             GMLC_RST_SET);
 1313                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1314                             GMLC_RST_CLR);
 1315                 }
 1316                 break;
 1317         case MSK_PHY_POWERDOWN:
 1318                 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1319                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1320                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1321                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1322                         val &= ~PCI_Y2_PHY1_COMA;
 1323                         if (sc->msk_num_port > 1)
 1324                                 val &= ~PCI_Y2_PHY2_COMA;
 1325                 }
 1326                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1327 
 1328                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1329                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1330                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1331                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1332                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1333                         /* Enable bits are inverted. */
 1334                         val = 0;
 1335                 }
 1336                 /*
 1337                  * Disable PCI & Core Clock, disable clock gating for
 1338                  * both Links.
 1339                  */
 1340                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1341                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1342                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1343                 break;
 1344         default:
 1345                 break;
 1346         }
 1347 }
 1348 
 1349 static void
 1350 mskc_reset(struct msk_softc *sc)
 1351 {
 1352         bus_addr_t addr;
 1353         uint16_t status;
 1354         uint32_t val;
 1355         int i, initram;
 1356 
 1357         /* Disable ASF. */
 1358         if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
 1359             sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
 1360                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1361                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1362                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1363                         status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1364                         /* Clear AHB bridge & microcontroller reset. */
 1365                         status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1366                             Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1367                         /* Clear ASF microcontroller state. */
 1368                         status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1369                         status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
 1370                         CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1371                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1372                 } else
 1373                         CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1374                 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1375                 /*
 1376                  * Since we disabled ASF, S/W reset is required for
 1377                  * Power Management.
 1378                  */
 1379                 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1380                 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1381         }
 1382 
 1383         /* Clear all error bits in the PCI status register. */
 1384         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1385         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1386 
 1387         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1388             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1389             PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 1390         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1391 
 1392         switch (sc->msk_bustype) {
 1393         case MSK_PEX_BUS:
 1394                 /* Clear all PEX errors. */
 1395                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1396                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1397                 if ((val & PEX_RX_OV) != 0) {
 1398                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1399                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1400                 }
 1401                 break;
 1402         case MSK_PCI_BUS:
 1403         case MSK_PCIX_BUS:
 1404                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1405                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1406                 if (val == 0)
 1407                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1408                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1409                         /* Set Cache Line Size opt. */
 1410                         val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1411                         val |= PCI_CLS_OPT;
 1412                         pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1413                 }
 1414                 break;
 1415         }
 1416         /* Set PHY power state. */
 1417         msk_phy_power(sc, MSK_PHY_POWERUP);
 1418 
 1419         /* Reset GPHY/GMAC Control */
 1420         for (i = 0; i < sc->msk_num_port; i++) {
 1421                 /* GPHY Control reset. */
 1422                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1423                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1424                 /* GMAC Control reset. */
 1425                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1426                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1427                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1428                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1429                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 1430                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1431                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1432                             GMC_BYP_RETR_ON);
 1433         }
 1434 
 1435         if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
 1436             sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
 1437                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
 1438         if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
 1439                 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
 1440                 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
 1441         }
 1442         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1443 
 1444         /* LED On. */
 1445         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1446 
 1447         /* Clear TWSI IRQ. */
 1448         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1449 
 1450         /* Turn off hardware timer. */
 1451         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1452         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1453 
 1454         /* Turn off descriptor polling. */
 1455         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1456 
 1457         /* Turn off time stamps. */
 1458         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1459         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1460 
 1461         initram = 0;
 1462         if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
 1463             sc->msk_hw_id == CHIP_ID_YUKON_EC ||
 1464             sc->msk_hw_id == CHIP_ID_YUKON_FE)
 1465                 initram++;
 1466 
 1467         /* Configure timeout values. */
 1468         for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
 1469                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
 1470                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
 1471                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1472                     MSK_RI_TO_53);
 1473                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1474                     MSK_RI_TO_53);
 1475                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1476                     MSK_RI_TO_53);
 1477                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1478                     MSK_RI_TO_53);
 1479                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1480                     MSK_RI_TO_53);
 1481                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1482                     MSK_RI_TO_53);
 1483                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1484                     MSK_RI_TO_53);
 1485                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1486                     MSK_RI_TO_53);
 1487                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1488                     MSK_RI_TO_53);
 1489                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1490                     MSK_RI_TO_53);
 1491                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1492                     MSK_RI_TO_53);
 1493                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1494                     MSK_RI_TO_53);
 1495         }
 1496 
 1497         /* Disable all interrupts. */
 1498         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1499         CSR_READ_4(sc, B0_HWE_IMSK);
 1500         CSR_WRITE_4(sc, B0_IMSK, 0);
 1501         CSR_READ_4(sc, B0_IMSK);
 1502 
 1503         /*
 1504          * On dual port PCI-X card, there is an problem where status
 1505          * can be received out of order due to split transactions.
 1506          */
 1507         if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
 1508                 uint16_t pcix_cmd;
 1509 
 1510                 pcix_cmd = pci_read_config(sc->msk_dev,
 1511                     sc->msk_pcixcap + PCIXR_COMMAND, 2);
 1512                 /* Clear Max Outstanding Split Transactions. */
 1513                 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
 1514                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1515                 pci_write_config(sc->msk_dev,
 1516                     sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
 1517                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1518         }
 1519         if (sc->msk_expcap != 0) {
 1520                 /* Change Max. Read Request Size to 2048 bytes. */
 1521                 if (pci_get_max_read_req(sc->msk_dev) == 512)
 1522                         pci_set_max_read_req(sc->msk_dev, 2048);
 1523         }
 1524 
 1525         /* Clear status list. */
 1526         bzero(sc->msk_stat_ring,
 1527             sizeof(struct msk_stat_desc) * sc->msk_stat_count);
 1528         sc->msk_stat_cons = 0;
 1529         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 1530             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1531         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1532         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1533         /* Set the status list base address. */
 1534         addr = sc->msk_stat_ring_paddr;
 1535         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1536         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1537         /* Set the status list last index. */
 1538         CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
 1539         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1540             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1541                 /* WA for dev. #4.3 */
 1542                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1543                 /* WA for dev. #4.18 */
 1544                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1545                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1546         } else {
 1547                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1548                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1549                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1550                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1551                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1552                 else
 1553                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1554                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1555         }
 1556         /*
 1557          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1558          */
 1559         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1560 
 1561         /* Enable status unit. */
 1562         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1563 
 1564         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1565         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1566         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1567 }
 1568 
 1569 static int
 1570 msk_probe(device_t dev)
 1571 {
 1572         struct msk_softc *sc;
 1573         char desc[100];
 1574 
 1575         sc = device_get_softc(device_get_parent(dev));
 1576         /*
 1577          * Not much to do here. We always know there will be
 1578          * at least one GMAC present, and if there are two,
 1579          * mskc_attach() will create a second device instance
 1580          * for us.
 1581          */
 1582         snprintf(desc, sizeof(desc),
 1583             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1584             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1585             sc->msk_hw_rev);
 1586         device_set_desc_copy(dev, desc);
 1587 
 1588         return (BUS_PROBE_DEFAULT);
 1589 }
 1590 
 1591 static int
 1592 msk_attach(device_t dev)
 1593 {
 1594         struct msk_softc *sc;
 1595         struct msk_if_softc *sc_if;
 1596         struct ifnet *ifp;
 1597         struct msk_mii_data *mmd;
 1598         int i, port, error;
 1599         uint8_t eaddr[6];
 1600 
 1601         if (dev == NULL)
 1602                 return (EINVAL);
 1603 
 1604         error = 0;
 1605         sc_if = device_get_softc(dev);
 1606         sc = device_get_softc(device_get_parent(dev));
 1607         mmd = device_get_ivars(dev);
 1608         port = mmd->port;
 1609 
 1610         sc_if->msk_if_dev = dev;
 1611         sc_if->msk_port = port;
 1612         sc_if->msk_softc = sc;
 1613         sc_if->msk_flags = sc->msk_pflags;
 1614         sc->msk_if[port] = sc_if;
 1615         /* Setup Tx/Rx queue register offsets. */
 1616         if (port == MSK_PORT_A) {
 1617                 sc_if->msk_txq = Q_XA1;
 1618                 sc_if->msk_txsq = Q_XS1;
 1619                 sc_if->msk_rxq = Q_R1;
 1620         } else {
 1621                 sc_if->msk_txq = Q_XA2;
 1622                 sc_if->msk_txsq = Q_XS2;
 1623                 sc_if->msk_rxq = Q_R2;
 1624         }
 1625 
 1626         callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
 1627         msk_sysctl_node(sc_if);
 1628 
 1629         if ((error = msk_txrx_dma_alloc(sc_if) != 0))
 1630                 goto fail;
 1631         msk_rx_dma_jalloc(sc_if);
 1632 
 1633         ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
 1634         if (ifp == NULL) {
 1635                 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
 1636                 error = ENOSPC;
 1637                 goto fail;
 1638         }
 1639         ifp->if_softc = sc_if;
 1640         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1641         ifp->if_mtu = ETHERMTU;
 1642         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1643         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
 1644         /*
 1645          * Enable Rx checksum offloading if controller supports
 1646          * new descriptor formant and controller is not Yukon XL.
 1647          */
 1648         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 1649             sc->msk_hw_id != CHIP_ID_YUKON_XL)
 1650                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1651         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1652             (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1653                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1654         ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
 1655         ifp->if_capenable = ifp->if_capabilities;
 1656         ifp->if_ioctl = msk_ioctl;
 1657         ifp->if_start = msk_start;
 1658         ifp->if_init = msk_init;
 1659         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1660         ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
 1661         IFQ_SET_READY(&ifp->if_snd);
 1662         /*
 1663          * Get station address for this interface. Note that
 1664          * dual port cards actually come with three station
 1665          * addresses: one for each port, plus an extra. The
 1666          * extra one is used by the SysKonnect driver software
 1667          * as a 'virtual' station address for when both ports
 1668          * are operating in failover mode. Currently we don't
 1669          * use this extra address.
 1670          */
 1671         MSK_IF_LOCK(sc_if);
 1672         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1673                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1674 
 1675         /*
 1676          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1677          */
 1678         MSK_IF_UNLOCK(sc_if);
 1679         ether_ifattach(ifp, eaddr);
 1680         MSK_IF_LOCK(sc_if);
 1681 
 1682         /* VLAN capability setup */
 1683         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1684         if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
 1685                 /*
 1686                  * Due to Tx checksum offload hardware bugs, msk(4) manually
 1687                  * computes checksum for short frames. For VLAN tagged frames
 1688                  * this workaround does not work so disable checksum offload
 1689                  * for VLAN interface.
 1690                  */
 1691                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
 1692                 /*
 1693                  * Enable Rx checksum offloading for VLAN tagged frames
 1694                  * if controller support new descriptor format.
 1695                  */
 1696                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1697                     (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1698                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
 1699         }
 1700         ifp->if_capenable = ifp->if_capabilities;
 1701         /*
 1702          * Disable RX checksum offloading on controllers that don't use
 1703          * new descriptor format but give chance to enable it.
 1704          */
 1705         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1706                 ifp->if_capenable &= ~IFCAP_RXCSUM;
 1707 
 1708         /*
 1709          * Tell the upper layer(s) we support long frames.
 1710          * Must appear after the call to ether_ifattach() because
 1711          * ether_ifattach() sets ifi_hdrlen to the default value.
 1712          */
 1713         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1714 
 1715         /*
 1716          * Do miibus setup.
 1717          */
 1718         MSK_IF_UNLOCK(sc_if);
 1719         error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
 1720             msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
 1721             mmd->mii_flags);
 1722         if (error != 0) {
 1723                 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
 1724                 ether_ifdetach(ifp);
 1725                 error = ENXIO;
 1726                 goto fail;
 1727         }
 1728 
 1729 fail:
 1730         if (error != 0) {
 1731                 /* Access should be ok even though lock has been dropped */
 1732                 sc->msk_if[port] = NULL;
 1733                 msk_detach(dev);
 1734         }
 1735 
 1736         return (error);
 1737 }
 1738 
 1739 /*
 1740  * Attach the interface. Allocate softc structures, do ifmedia
 1741  * setup and ethernet/BPF attach.
 1742  */
 1743 static int
 1744 mskc_attach(device_t dev)
 1745 {
 1746         struct msk_softc *sc;
 1747         struct msk_mii_data *mmd;
 1748         int error, msic, msir, reg;
 1749 
 1750         sc = device_get_softc(dev);
 1751         sc->msk_dev = dev;
 1752         mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1753             MTX_DEF);
 1754 
 1755         /*
 1756          * Map control/status registers.
 1757          */
 1758         pci_enable_busmaster(dev);
 1759 
 1760         /* Allocate I/O resource */
 1761 #ifdef MSK_USEIOSPACE
 1762         sc->msk_res_spec = msk_res_spec_io;
 1763 #else
 1764         sc->msk_res_spec = msk_res_spec_mem;
 1765 #endif
 1766         sc->msk_irq_spec = msk_irq_spec_legacy;
 1767         error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1768         if (error) {
 1769                 if (sc->msk_res_spec == msk_res_spec_mem)
 1770                         sc->msk_res_spec = msk_res_spec_io;
 1771                 else
 1772                         sc->msk_res_spec = msk_res_spec_mem;
 1773                 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1774                 if (error) {
 1775                         device_printf(dev, "couldn't allocate %s resources\n",
 1776                             sc->msk_res_spec == msk_res_spec_mem ? "memory" :
 1777                             "I/O");
 1778                         mtx_destroy(&sc->msk_mtx);
 1779                         return (ENXIO);
 1780                 }
 1781         }
 1782 
 1783         /* Enable all clocks before accessing any registers. */
 1784         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 1785 
 1786         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1787         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1788         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1789         /* Bail out if chip is not recognized. */
 1790         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1791             sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
 1792             sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
 1793                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1794                     sc->msk_hw_id, sc->msk_hw_rev);
 1795                 mtx_destroy(&sc->msk_mtx);
 1796                 return (ENXIO);
 1797         }
 1798 
 1799         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1800             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1801             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 1802             &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
 1803             "max number of Rx events to process");
 1804 
 1805         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1806         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1807             "process_limit", &sc->msk_process_limit);
 1808         if (error == 0) {
 1809                 if (sc->msk_process_limit < MSK_PROC_MIN ||
 1810                     sc->msk_process_limit > MSK_PROC_MAX) {
 1811                         device_printf(dev, "process_limit value out of range; "
 1812                             "using default: %d\n", MSK_PROC_DEFAULT);
 1813                         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1814                 }
 1815         }
 1816 
 1817         sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
 1818         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
 1819             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
 1820             "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
 1821             "Maximum number of time to delay interrupts");
 1822         resource_int_value(device_get_name(dev), device_get_unit(dev),
 1823             "int_holdoff", &sc->msk_int_holdoff);
 1824 
 1825         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1826         /* Check number of MACs. */
 1827         sc->msk_num_port = 1;
 1828         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1829             CFG_DUAL_MAC_MSK) {
 1830                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1831                         sc->msk_num_port++;
 1832         }
 1833 
 1834         /* Check bus type. */
 1835         if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
 1836                 sc->msk_bustype = MSK_PEX_BUS;
 1837                 sc->msk_expcap = reg;
 1838         } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
 1839                 sc->msk_bustype = MSK_PCIX_BUS;
 1840                 sc->msk_pcixcap = reg;
 1841         } else
 1842                 sc->msk_bustype = MSK_PCI_BUS;
 1843 
 1844         switch (sc->msk_hw_id) {
 1845         case CHIP_ID_YUKON_EC:
 1846                 sc->msk_clock = 125;    /* 125 MHz */
 1847                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1848                 break;
 1849         case CHIP_ID_YUKON_EC_U:
 1850                 sc->msk_clock = 125;    /* 125 MHz */
 1851                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
 1852                 break;
 1853         case CHIP_ID_YUKON_EX:
 1854                 sc->msk_clock = 125;    /* 125 MHz */
 1855                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1856                     MSK_FLAG_AUTOTX_CSUM;
 1857                 /*
 1858                  * Yukon Extreme seems to have silicon bug for
 1859                  * automatic Tx checksum calculation capability.
 1860                  */
 1861                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 1862                         sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
 1863                 /*
 1864                  * Yukon Extreme A0 could not use store-and-forward
 1865                  * for jumbo frames, so disable Tx checksum
 1866                  * offloading for jumbo frames.
 1867                  */
 1868                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 1869                         sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
 1870                 break;
 1871         case CHIP_ID_YUKON_FE:
 1872                 sc->msk_clock = 100;    /* 100 MHz */
 1873                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1874                 break;
 1875         case CHIP_ID_YUKON_FE_P:
 1876                 sc->msk_clock = 50;     /* 50 MHz */
 1877                 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
 1878                     MSK_FLAG_AUTOTX_CSUM;
 1879                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1880                         /*
 1881                          * XXX
 1882                          * FE+ A0 has status LE writeback bug so msk(4)
 1883                          * does not rely on status word of received frame
 1884                          * in msk_rxeof() which in turn disables all
 1885                          * hardware assistance bits reported by the status
 1886                          * word as well as validity of the received frame.
 1887                          * Just pass received frames to upper stack with
 1888                          * minimal test and let upper stack handle them.
 1889                          */
 1890                         sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
 1891                             MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
 1892                 }
 1893                 break;
 1894         case CHIP_ID_YUKON_XL:
 1895                 sc->msk_clock = 156;    /* 156 MHz */
 1896                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1897                 break;
 1898         case CHIP_ID_YUKON_SUPR:
 1899                 sc->msk_clock = 125;    /* 125 MHz */
 1900                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1901                     MSK_FLAG_AUTOTX_CSUM;
 1902                 break;
 1903         case CHIP_ID_YUKON_UL_2:
 1904                 sc->msk_clock = 125;    /* 125 MHz */
 1905                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1906                 break;
 1907         case CHIP_ID_YUKON_OPT:
 1908                 sc->msk_clock = 125;    /* 125 MHz */
 1909                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
 1910                 break;
 1911         default:
 1912                 sc->msk_clock = 156;    /* 156 MHz */
 1913                 break;
 1914         }
 1915 
 1916         /* Allocate IRQ resources. */
 1917         msic = pci_msi_count(dev);
 1918         if (bootverbose)
 1919                 device_printf(dev, "MSI count : %d\n", msic);
 1920         if (legacy_intr != 0)
 1921                 msi_disable = 1;
 1922         if (msi_disable == 0 && msic > 0) {
 1923                 msir = 1;
 1924                 if (pci_alloc_msi(dev, &msir) == 0) {
 1925                         if (msir == 1) {
 1926                                 sc->msk_pflags |= MSK_FLAG_MSI;
 1927                                 sc->msk_irq_spec = msk_irq_spec_msi;
 1928                         } else
 1929                                 pci_release_msi(dev);
 1930                 }
 1931         }
 1932 
 1933         error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1934         if (error) {
 1935                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1936                 goto fail;
 1937         }
 1938 
 1939         if ((error = msk_status_dma_alloc(sc)) != 0)
 1940                 goto fail;
 1941 
 1942         /* Set base interrupt mask. */
 1943         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1944         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1945             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1946 
 1947         /* Reset the adapter. */
 1948         mskc_reset(sc);
 1949 
 1950         if ((error = mskc_setup_rambuffer(sc)) != 0)
 1951                 goto fail;
 1952 
 1953         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1954         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1955                 device_printf(dev, "failed to add child for PORT_A\n");
 1956                 error = ENXIO;
 1957                 goto fail;
 1958         }
 1959         mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
 1960         if (mmd == NULL) {
 1961                 device_printf(dev, "failed to allocate memory for "
 1962                     "ivars of PORT_A\n");
 1963                 error = ENXIO;
 1964                 goto fail;
 1965         }
 1966         mmd->port = MSK_PORT_A;
 1967         mmd->pmd = sc->msk_pmd;
 1968         mmd->mii_flags |= MIIF_DOPAUSE;
 1969         if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1970                 mmd->mii_flags |= MIIF_HAVEFIBER;
 1971         if (sc->msk_pmd == 'P')
 1972                 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1973         device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
 1974 
 1975         if (sc->msk_num_port > 1) {
 1976                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1977                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1978                         device_printf(dev, "failed to add child for PORT_B\n");
 1979                         error = ENXIO;
 1980                         goto fail;
 1981                 }
 1982                 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
 1983                     M_ZERO);
 1984                 if (mmd == NULL) {
 1985                         device_printf(dev, "failed to allocate memory for "
 1986                             "ivars of PORT_B\n");
 1987                         error = ENXIO;
 1988                         goto fail;
 1989                 }
 1990                 mmd->port = MSK_PORT_B;
 1991                 mmd->pmd = sc->msk_pmd;
 1992                 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1993                         mmd->mii_flags |= MIIF_HAVEFIBER;
 1994                 if (sc->msk_pmd == 'P')
 1995                         mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1996                 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
 1997         }
 1998 
 1999         error = bus_generic_attach(dev);
 2000         if (error) {
 2001                 device_printf(dev, "failed to attach port(s)\n");
 2002                 goto fail;
 2003         }
 2004 
 2005         /* Hook interrupt last to avoid having to lock softc. */
 2006         error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 2007             INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
 2008         if (error != 0) {
 2009                 device_printf(dev, "couldn't set up interrupt handler\n");
 2010                 goto fail;
 2011         }
 2012 fail:
 2013         if (error != 0)
 2014                 mskc_detach(dev);
 2015 
 2016         return (error);
 2017 }
 2018 
 2019 /*
 2020  * Shutdown hardware and free up resources. This can be called any
 2021  * time after the mutex has been initialized. It is called in both
 2022  * the error case in attach and the normal detach case so it needs
 2023  * to be careful about only freeing resources that have actually been
 2024  * allocated.
 2025  */
 2026 static int
 2027 msk_detach(device_t dev)
 2028 {
 2029         struct msk_softc *sc;
 2030         struct msk_if_softc *sc_if;
 2031         struct ifnet *ifp;
 2032 
 2033         sc_if = device_get_softc(dev);
 2034         KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
 2035             ("msk mutex not initialized in msk_detach"));
 2036         MSK_IF_LOCK(sc_if);
 2037 
 2038         ifp = sc_if->msk_ifp;
 2039         if (device_is_attached(dev)) {
 2040                 /* XXX */
 2041                 sc_if->msk_flags |= MSK_FLAG_DETACH;
 2042                 msk_stop(sc_if);
 2043                 /* Can't hold locks while calling detach. */
 2044                 MSK_IF_UNLOCK(sc_if);
 2045                 callout_drain(&sc_if->msk_tick_ch);
 2046                 if (ifp)
 2047                         ether_ifdetach(ifp);
 2048                 MSK_IF_LOCK(sc_if);
 2049         }
 2050 
 2051         /*
 2052          * We're generally called from mskc_detach() which is using
 2053          * device_delete_child() to get to here. It's already trashed
 2054          * miibus for us, so don't do it here or we'll panic.
 2055          *
 2056          * if (sc_if->msk_miibus != NULL) {
 2057          *      device_delete_child(dev, sc_if->msk_miibus);
 2058          *      sc_if->msk_miibus = NULL;
 2059          * }
 2060          */
 2061 
 2062         msk_rx_dma_jfree(sc_if);
 2063         msk_txrx_dma_free(sc_if);
 2064         bus_generic_detach(dev);
 2065 
 2066         if (ifp)
 2067                 if_free(ifp);
 2068         sc = sc_if->msk_softc;
 2069         sc->msk_if[sc_if->msk_port] = NULL;
 2070         MSK_IF_UNLOCK(sc_if);
 2071 
 2072         return (0);
 2073 }
 2074 
 2075 static int
 2076 mskc_detach(device_t dev)
 2077 {
 2078         struct msk_softc *sc;
 2079 
 2080         sc = device_get_softc(dev);
 2081         KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
 2082 
 2083         if (device_is_alive(dev)) {
 2084                 if (sc->msk_devs[MSK_PORT_A] != NULL) {
 2085                         free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
 2086                             M_DEVBUF);
 2087                         device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
 2088                 }
 2089                 if (sc->msk_devs[MSK_PORT_B] != NULL) {
 2090                         free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
 2091                             M_DEVBUF);
 2092                         device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
 2093                 }
 2094                 bus_generic_detach(dev);
 2095         }
 2096 
 2097         /* Disable all interrupts. */
 2098         CSR_WRITE_4(sc, B0_IMSK, 0);
 2099         CSR_READ_4(sc, B0_IMSK);
 2100         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2101         CSR_READ_4(sc, B0_HWE_IMSK);
 2102 
 2103         /* LED Off. */
 2104         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 2105 
 2106         /* Put hardware reset. */
 2107         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2108 
 2109         msk_status_dma_free(sc);
 2110 
 2111         if (sc->msk_intrhand) {
 2112                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
 2113                 sc->msk_intrhand = NULL;
 2114         }
 2115         bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 2116         if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
 2117                 pci_release_msi(dev);
 2118         bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
 2119         mtx_destroy(&sc->msk_mtx);
 2120 
 2121         return (0);
 2122 }
 2123 
 2124 static bus_dma_tag_t
 2125 mskc_get_dma_tag(device_t bus, device_t child __unused)
 2126 {
 2127 
 2128         return (bus_get_dma_tag(bus));
 2129 }
 2130 
 2131 struct msk_dmamap_arg {
 2132         bus_addr_t      msk_busaddr;
 2133 };
 2134 
 2135 static void
 2136 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2137 {
 2138         struct msk_dmamap_arg *ctx;
 2139 
 2140         if (error != 0)
 2141                 return;
 2142         ctx = arg;
 2143         ctx->msk_busaddr = segs[0].ds_addr;
 2144 }
 2145 
 2146 /* Create status DMA region. */
 2147 static int
 2148 msk_status_dma_alloc(struct msk_softc *sc)
 2149 {
 2150         struct msk_dmamap_arg ctx;
 2151         bus_size_t stat_sz;
 2152         int count, error;
 2153 
 2154         /*
 2155          * It seems controller requires number of status LE entries
 2156          * is power of 2 and the maximum number of status LE entries
 2157          * is 4096.  For dual-port controllers, the number of status
 2158          * LE entries should be large enough to hold both port's
 2159          * status updates.
 2160          */
 2161         count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
 2162         count = imin(4096, roundup2(count, 1024));
 2163         sc->msk_stat_count = count;
 2164         stat_sz = count * sizeof(struct msk_stat_desc);
 2165         error = bus_dma_tag_create(
 2166                     bus_get_dma_tag(sc->msk_dev),       /* parent */
 2167                     MSK_STAT_ALIGN, 0,          /* alignment, boundary */
 2168                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2169                     BUS_SPACE_MAXADDR,          /* highaddr */
 2170                     NULL, NULL,                 /* filter, filterarg */
 2171                     stat_sz,                    /* maxsize */
 2172                     1,                          /* nsegments */
 2173                     stat_sz,                    /* maxsegsize */
 2174                     0,                          /* flags */
 2175                     NULL, NULL,                 /* lockfunc, lockarg */
 2176                     &sc->msk_stat_tag);
 2177         if (error != 0) {
 2178                 device_printf(sc->msk_dev,
 2179                     "failed to create status DMA tag\n");
 2180                 return (error);
 2181         }
 2182 
 2183         /* Allocate DMA'able memory and load the DMA map for status ring. */
 2184         error = bus_dmamem_alloc(sc->msk_stat_tag,
 2185             (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
 2186             BUS_DMA_ZERO, &sc->msk_stat_map);
 2187         if (error != 0) {
 2188                 device_printf(sc->msk_dev,
 2189                     "failed to allocate DMA'able memory for status ring\n");
 2190                 return (error);
 2191         }
 2192 
 2193         ctx.msk_busaddr = 0;
 2194         error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
 2195             sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2196         if (error != 0) {
 2197                 device_printf(sc->msk_dev,
 2198                     "failed to load DMA'able memory for status ring\n");
 2199                 return (error);
 2200         }
 2201         sc->msk_stat_ring_paddr = ctx.msk_busaddr;
 2202 
 2203         return (0);
 2204 }
 2205 
 2206 static void
 2207 msk_status_dma_free(struct msk_softc *sc)
 2208 {
 2209 
 2210         /* Destroy status block. */
 2211         if (sc->msk_stat_tag) {
 2212                 if (sc->msk_stat_map) {
 2213                         bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 2214                         if (sc->msk_stat_ring) {
 2215                                 bus_dmamem_free(sc->msk_stat_tag,
 2216                                     sc->msk_stat_ring, sc->msk_stat_map);
 2217                                 sc->msk_stat_ring = NULL;
 2218                         }
 2219                         sc->msk_stat_map = NULL;
 2220                 }
 2221                 bus_dma_tag_destroy(sc->msk_stat_tag);
 2222                 sc->msk_stat_tag = NULL;
 2223         }
 2224 }
 2225 
 2226 static int
 2227 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 2228 {
 2229         struct msk_dmamap_arg ctx;
 2230         struct msk_txdesc *txd;
 2231         struct msk_rxdesc *rxd;
 2232         bus_size_t rxalign;
 2233         int error, i;
 2234 
 2235         /* Create parent DMA tag. */
 2236         error = bus_dma_tag_create(
 2237                     bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
 2238                     1, 0,                       /* alignment, boundary */
 2239                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2240                     BUS_SPACE_MAXADDR,          /* highaddr */
 2241                     NULL, NULL,                 /* filter, filterarg */
 2242                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 2243                     0,                          /* nsegments */
 2244                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 2245                     0,                          /* flags */
 2246                     NULL, NULL,                 /* lockfunc, lockarg */
 2247                     &sc_if->msk_cdata.msk_parent_tag);
 2248         if (error != 0) {
 2249                 device_printf(sc_if->msk_if_dev,
 2250                     "failed to create parent DMA tag\n");
 2251                 goto fail;
 2252         }
 2253         /* Create tag for Tx ring. */
 2254         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2255                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2256                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2257                     BUS_SPACE_MAXADDR,          /* highaddr */
 2258                     NULL, NULL,                 /* filter, filterarg */
 2259                     MSK_TX_RING_SZ,             /* maxsize */
 2260                     1,                          /* nsegments */
 2261                     MSK_TX_RING_SZ,             /* maxsegsize */
 2262                     0,                          /* flags */
 2263                     NULL, NULL,                 /* lockfunc, lockarg */
 2264                     &sc_if->msk_cdata.msk_tx_ring_tag);
 2265         if (error != 0) {
 2266                 device_printf(sc_if->msk_if_dev,
 2267                     "failed to create Tx ring DMA tag\n");
 2268                 goto fail;
 2269         }
 2270 
 2271         /* Create tag for Rx ring. */
 2272         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2273                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2274                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2275                     BUS_SPACE_MAXADDR,          /* highaddr */
 2276                     NULL, NULL,                 /* filter, filterarg */
 2277                     MSK_RX_RING_SZ,             /* maxsize */
 2278                     1,                          /* nsegments */
 2279                     MSK_RX_RING_SZ,             /* maxsegsize */
 2280                     0,                          /* flags */
 2281                     NULL, NULL,                 /* lockfunc, lockarg */
 2282                     &sc_if->msk_cdata.msk_rx_ring_tag);
 2283         if (error != 0) {
 2284                 device_printf(sc_if->msk_if_dev,
 2285                     "failed to create Rx ring DMA tag\n");
 2286                 goto fail;
 2287         }
 2288 
 2289         /* Create tag for Tx buffers. */
 2290         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2291                     1, 0,                       /* alignment, boundary */
 2292                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2293                     BUS_SPACE_MAXADDR,          /* highaddr */
 2294                     NULL, NULL,                 /* filter, filterarg */
 2295                     MSK_TSO_MAXSIZE,            /* maxsize */
 2296                     MSK_MAXTXSEGS,              /* nsegments */
 2297                     MSK_TSO_MAXSGSIZE,          /* maxsegsize */
 2298                     0,                          /* flags */
 2299                     NULL, NULL,                 /* lockfunc, lockarg */
 2300                     &sc_if->msk_cdata.msk_tx_tag);
 2301         if (error != 0) {
 2302                 device_printf(sc_if->msk_if_dev,
 2303                     "failed to create Tx DMA tag\n");
 2304                 goto fail;
 2305         }
 2306 
 2307         rxalign = 1;
 2308         /*
 2309          * Workaround hardware hang which seems to happen when Rx buffer
 2310          * is not aligned on multiple of FIFO word(8 bytes).
 2311          */
 2312         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2313                 rxalign = MSK_RX_BUF_ALIGN;
 2314         /* Create tag for Rx buffers. */
 2315         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2316                     rxalign, 0,                 /* alignment, boundary */
 2317                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2318                     BUS_SPACE_MAXADDR,          /* highaddr */
 2319                     NULL, NULL,                 /* filter, filterarg */
 2320                     MCLBYTES,                   /* maxsize */
 2321                     1,                          /* nsegments */
 2322                     MCLBYTES,                   /* maxsegsize */
 2323                     0,                          /* flags */
 2324                     NULL, NULL,                 /* lockfunc, lockarg */
 2325                     &sc_if->msk_cdata.msk_rx_tag);
 2326         if (error != 0) {
 2327                 device_printf(sc_if->msk_if_dev,
 2328                     "failed to create Rx DMA tag\n");
 2329                 goto fail;
 2330         }
 2331 
 2332         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 2333         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
 2334             (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
 2335             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
 2336         if (error != 0) {
 2337                 device_printf(sc_if->msk_if_dev,
 2338                     "failed to allocate DMA'able memory for Tx ring\n");
 2339                 goto fail;
 2340         }
 2341 
 2342         ctx.msk_busaddr = 0;
 2343         error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
 2344             sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
 2345             MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2346         if (error != 0) {
 2347                 device_printf(sc_if->msk_if_dev,
 2348                     "failed to load DMA'able memory for Tx ring\n");
 2349                 goto fail;
 2350         }
 2351         sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
 2352 
 2353         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 2354         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
 2355             (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
 2356             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
 2357         if (error != 0) {
 2358                 device_printf(sc_if->msk_if_dev,
 2359                     "failed to allocate DMA'able memory for Rx ring\n");
 2360                 goto fail;
 2361         }
 2362 
 2363         ctx.msk_busaddr = 0;
 2364         error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
 2365             sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
 2366             MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2367         if (error != 0) {
 2368                 device_printf(sc_if->msk_if_dev,
 2369                     "failed to load DMA'able memory for Rx ring\n");
 2370                 goto fail;
 2371         }
 2372         sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
 2373 
 2374         /* Create DMA maps for Tx buffers. */
 2375         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2376                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 2377                 txd->tx_m = NULL;
 2378                 txd->tx_dmamap = NULL;
 2379                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
 2380                     &txd->tx_dmamap);
 2381                 if (error != 0) {
 2382                         device_printf(sc_if->msk_if_dev,
 2383                             "failed to create Tx dmamap\n");
 2384                         goto fail;
 2385                 }
 2386         }
 2387         /* Create DMA maps for Rx buffers. */
 2388         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2389             &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
 2390                 device_printf(sc_if->msk_if_dev,
 2391                     "failed to create spare Rx dmamap\n");
 2392                 goto fail;
 2393         }
 2394         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2395                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2396                 rxd->rx_m = NULL;
 2397                 rxd->rx_dmamap = NULL;
 2398                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2399                     &rxd->rx_dmamap);
 2400                 if (error != 0) {
 2401                         device_printf(sc_if->msk_if_dev,
 2402                             "failed to create Rx dmamap\n");
 2403                         goto fail;
 2404                 }
 2405         }
 2406 
 2407 fail:
 2408         return (error);
 2409 }
 2410 
 2411 static int
 2412 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
 2413 {
 2414         struct msk_dmamap_arg ctx;
 2415         struct msk_rxdesc *jrxd;
 2416         bus_size_t rxalign;
 2417         int error, i;
 2418 
 2419         if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 2420                 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2421                 device_printf(sc_if->msk_if_dev,
 2422                     "disabling jumbo frame support\n");
 2423                 return (0);
 2424         }
 2425         /* Create tag for jumbo Rx ring. */
 2426         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2427                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2428                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2429                     BUS_SPACE_MAXADDR,          /* highaddr */
 2430                     NULL, NULL,                 /* filter, filterarg */
 2431                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2432                     1,                          /* nsegments */
 2433                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2434                     0,                          /* flags */
 2435                     NULL, NULL,                 /* lockfunc, lockarg */
 2436                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2437         if (error != 0) {
 2438                 device_printf(sc_if->msk_if_dev,
 2439                     "failed to create jumbo Rx ring DMA tag\n");
 2440                 goto jumbo_fail;
 2441         }
 2442 
 2443         rxalign = 1;
 2444         /*
 2445          * Workaround hardware hang which seems to happen when Rx buffer
 2446          * is not aligned on multiple of FIFO word(8 bytes).
 2447          */
 2448         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2449                 rxalign = MSK_RX_BUF_ALIGN;
 2450         /* Create tag for jumbo Rx buffers. */
 2451         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2452                     rxalign, 0,                 /* alignment, boundary */
 2453                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2454                     BUS_SPACE_MAXADDR,          /* highaddr */
 2455                     NULL, NULL,                 /* filter, filterarg */
 2456                     MJUM9BYTES,                 /* maxsize */
 2457                     1,                          /* nsegments */
 2458                     MJUM9BYTES,                 /* maxsegsize */
 2459                     0,                          /* flags */
 2460                     NULL, NULL,                 /* lockfunc, lockarg */
 2461                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2462         if (error != 0) {
 2463                 device_printf(sc_if->msk_if_dev,
 2464                     "failed to create jumbo Rx DMA tag\n");
 2465                 goto jumbo_fail;
 2466         }
 2467 
 2468         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2469         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2470             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2471             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2472             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2473         if (error != 0) {
 2474                 device_printf(sc_if->msk_if_dev,
 2475                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2476                 goto jumbo_fail;
 2477         }
 2478 
 2479         ctx.msk_busaddr = 0;
 2480         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2481             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2482             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2483             msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2484         if (error != 0) {
 2485                 device_printf(sc_if->msk_if_dev,
 2486                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2487                 goto jumbo_fail;
 2488         }
 2489         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2490 
 2491         /* Create DMA maps for jumbo Rx buffers. */
 2492         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2493             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2494                 device_printf(sc_if->msk_if_dev,
 2495                     "failed to create spare jumbo Rx dmamap\n");
 2496                 goto jumbo_fail;
 2497         }
 2498         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2499                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2500                 jrxd->rx_m = NULL;
 2501                 jrxd->rx_dmamap = NULL;
 2502                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2503                     &jrxd->rx_dmamap);
 2504                 if (error != 0) {
 2505                         device_printf(sc_if->msk_if_dev,
 2506                             "failed to create jumbo Rx dmamap\n");
 2507                         goto jumbo_fail;
 2508                 }
 2509         }
 2510 
 2511         return (0);
 2512 
 2513 jumbo_fail:
 2514         msk_rx_dma_jfree(sc_if);
 2515         device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
 2516             "due to resource shortage\n");
 2517         sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2518         return (error);
 2519 }
 2520 
 2521 static void
 2522 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2523 {
 2524         struct msk_txdesc *txd;
 2525         struct msk_rxdesc *rxd;
 2526         int i;
 2527 
 2528         /* Tx ring. */
 2529         if (sc_if->msk_cdata.msk_tx_ring_tag) {
 2530                 if (sc_if->msk_cdata.msk_tx_ring_map)
 2531                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
 2532                             sc_if->msk_cdata.msk_tx_ring_map);
 2533                 if (sc_if->msk_cdata.msk_tx_ring_map &&
 2534                     sc_if->msk_rdata.msk_tx_ring)
 2535                         bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
 2536                             sc_if->msk_rdata.msk_tx_ring,
 2537                             sc_if->msk_cdata.msk_tx_ring_map);
 2538                 sc_if->msk_rdata.msk_tx_ring = NULL;
 2539                 sc_if->msk_cdata.msk_tx_ring_map = NULL;
 2540                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
 2541                 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
 2542         }
 2543         /* Rx ring. */
 2544         if (sc_if->msk_cdata.msk_rx_ring_tag) {
 2545                 if (sc_if->msk_cdata.msk_rx_ring_map)
 2546                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
 2547                             sc_if->msk_cdata.msk_rx_ring_map);
 2548                 if (sc_if->msk_cdata.msk_rx_ring_map &&
 2549                     sc_if->msk_rdata.msk_rx_ring)
 2550                         bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
 2551                             sc_if->msk_rdata.msk_rx_ring,
 2552                             sc_if->msk_cdata.msk_rx_ring_map);
 2553                 sc_if->msk_rdata.msk_rx_ring = NULL;
 2554                 sc_if->msk_cdata.msk_rx_ring_map = NULL;
 2555                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
 2556                 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
 2557         }
 2558         /* Tx buffers. */
 2559         if (sc_if->msk_cdata.msk_tx_tag) {
 2560                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2561                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2562                         if (txd->tx_dmamap) {
 2563                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2564                                     txd->tx_dmamap);
 2565                                 txd->tx_dmamap = NULL;
 2566                         }
 2567                 }
 2568                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2569                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2570         }
 2571         /* Rx buffers. */
 2572         if (sc_if->msk_cdata.msk_rx_tag) {
 2573                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2574                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2575                         if (rxd->rx_dmamap) {
 2576                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2577                                     rxd->rx_dmamap);
 2578                                 rxd->rx_dmamap = NULL;
 2579                         }
 2580                 }
 2581                 if (sc_if->msk_cdata.msk_rx_sparemap) {
 2582                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2583                             sc_if->msk_cdata.msk_rx_sparemap);
 2584                         sc_if->msk_cdata.msk_rx_sparemap = 0;
 2585                 }
 2586                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2587                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2588         }
 2589         if (sc_if->msk_cdata.msk_parent_tag) {
 2590                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2591                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2592         }
 2593 }
 2594 
 2595 static void
 2596 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
 2597 {
 2598         struct msk_rxdesc *jrxd;
 2599         int i;
 2600 
 2601         /* Jumbo Rx ring. */
 2602         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2603                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
 2604                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2605                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2606                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
 2607                     sc_if->msk_rdata.msk_jumbo_rx_ring)
 2608                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2609                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2610                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2611                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2612                 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
 2613                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2614                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2615         }
 2616         /* Jumbo Rx buffers. */
 2617         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2618                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2619                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2620                         if (jrxd->rx_dmamap) {
 2621                                 bus_dmamap_destroy(
 2622                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2623                                     jrxd->rx_dmamap);
 2624                                 jrxd->rx_dmamap = NULL;
 2625                         }
 2626                 }
 2627                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2628                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2629                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2630                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2631                 }
 2632                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2633                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2634         }
 2635 }
 2636 
 2637 static int
 2638 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2639 {
 2640         struct msk_txdesc *txd, *txd_last;
 2641         struct msk_tx_desc *tx_le;
 2642         struct mbuf *m;
 2643         bus_dmamap_t map;
 2644         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2645         uint32_t control, csum, prod, si;
 2646         uint16_t offset, tcp_offset, tso_mtu;
 2647         int error, i, nseg, tso;
 2648 
 2649         MSK_IF_LOCK_ASSERT(sc_if);
 2650 
 2651         tcp_offset = offset = 0;
 2652         m = *m_head;
 2653         if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2654             (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
 2655             ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 2656             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
 2657                 /*
 2658                  * Since mbuf has no protocol specific structure information
 2659                  * in it we have to inspect protocol information here to
 2660                  * setup TSO and checksum offload. I don't know why Marvell
 2661                  * made a such decision in chip design because other GigE
 2662                  * hardwares normally takes care of all these chores in
 2663                  * hardware. However, TSO performance of Yukon II is very
 2664                  * good such that it's worth to implement it.
 2665                  */
 2666                 struct ether_header *eh;
 2667                 struct ip *ip;
 2668                 struct tcphdr *tcp;
 2669 
 2670                 if (M_WRITABLE(m) == 0) {
 2671                         /* Get a writable copy. */
 2672                         m = m_dup(*m_head, M_NOWAIT);
 2673                         m_freem(*m_head);
 2674                         if (m == NULL) {
 2675                                 *m_head = NULL;
 2676                                 return (ENOBUFS);
 2677                         }
 2678                         *m_head = m;
 2679                 }
 2680 
 2681                 offset = sizeof(struct ether_header);
 2682                 m = m_pullup(m, offset);
 2683                 if (m == NULL) {
 2684                         *m_head = NULL;
 2685                         return (ENOBUFS);
 2686                 }
 2687                 eh = mtod(m, struct ether_header *);
 2688                 /* Check if hardware VLAN insertion is off. */
 2689                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2690                         offset = sizeof(struct ether_vlan_header);
 2691                         m = m_pullup(m, offset);
 2692                         if (m == NULL) {
 2693                                 *m_head = NULL;
 2694                                 return (ENOBUFS);
 2695                         }
 2696                 }
 2697                 m = m_pullup(m, offset + sizeof(struct ip));
 2698                 if (m == NULL) {
 2699                         *m_head = NULL;
 2700                         return (ENOBUFS);
 2701                 }
 2702                 ip = (struct ip *)(mtod(m, char *) + offset);
 2703                 offset += (ip->ip_hl << 2);
 2704                 tcp_offset = offset;
 2705                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2706                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2707                         if (m == NULL) {
 2708                                 *m_head = NULL;
 2709                                 return (ENOBUFS);
 2710                         }
 2711                         tcp = (struct tcphdr *)(mtod(m, char *) + offset);
 2712                         offset += (tcp->th_off << 2);
 2713                 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2714                     (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
 2715                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2716                         /*
 2717                          * It seems that Yukon II has Tx checksum offload bug
 2718                          * for small TCP packets that's less than 60 bytes in
 2719                          * size (e.g. TCP window probe packet, pure ACK packet).
 2720                          * Common work around like padding with zeros to make
 2721                          * the frame minimum ethernet frame size didn't work at
 2722                          * all.
 2723                          * Instead of disabling checksum offload completely we
 2724                          * resort to S/W checksum routine when we encounter
 2725                          * short TCP frames.
 2726                          * Short UDP packets appear to be handled correctly by
 2727                          * Yukon II. Also I assume this bug does not happen on
 2728                          * controllers that use newer descriptor format or
 2729                          * automatic Tx checksum calculation.
 2730                          */
 2731                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2732                         if (m == NULL) {
 2733                                 *m_head = NULL;
 2734                                 return (ENOBUFS);
 2735                         }
 2736                         *(uint16_t *)(m->m_data + offset +
 2737                             m->m_pkthdr.csum_data) = in_cksum_skip(m,
 2738                             m->m_pkthdr.len, offset);
 2739                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2740                 }
 2741                 *m_head = m;
 2742         }
 2743 
 2744         prod = sc_if->msk_cdata.msk_tx_prod;
 2745         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2746         txd_last = txd;
 2747         map = txd->tx_dmamap;
 2748         error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
 2749             *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2750         if (error == EFBIG) {
 2751                 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
 2752                 if (m == NULL) {
 2753                         m_freem(*m_head);
 2754                         *m_head = NULL;
 2755                         return (ENOBUFS);
 2756                 }
 2757                 *m_head = m;
 2758                 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
 2759                     map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2760                 if (error != 0) {
 2761                         m_freem(*m_head);
 2762                         *m_head = NULL;
 2763                         return (error);
 2764                 }
 2765         } else if (error != 0)
 2766                 return (error);
 2767         if (nseg == 0) {
 2768                 m_freem(*m_head);
 2769                 *m_head = NULL;
 2770                 return (EIO);
 2771         }
 2772 
 2773         /* Check number of available descriptors. */
 2774         if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
 2775             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
 2776                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
 2777                 return (ENOBUFS);
 2778         }
 2779 
 2780         control = 0;
 2781         tso = 0;
 2782         tx_le = NULL;
 2783 
 2784         /* Check TSO support. */
 2785         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2786                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2787                         tso_mtu = m->m_pkthdr.tso_segsz;
 2788                 else
 2789                         tso_mtu = offset + m->m_pkthdr.tso_segsz;
 2790                 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
 2791                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2792                         tx_le->msk_addr = htole32(tso_mtu);
 2793                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2794                                 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
 2795                         else
 2796                                 tx_le->msk_control =
 2797                                     htole32(OP_LRGLEN | HW_OWNER);
 2798                         sc_if->msk_cdata.msk_tx_cnt++;
 2799                         MSK_INC(prod, MSK_TX_RING_CNT);
 2800                         sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
 2801                 }
 2802                 tso++;
 2803         }
 2804         /* Check if we have a VLAN tag to insert. */
 2805         if ((m->m_flags & M_VLANTAG) != 0) {
 2806                 if (tx_le == NULL) {
 2807                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2808                         tx_le->msk_addr = htole32(0);
 2809                         tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2810                             htons(m->m_pkthdr.ether_vtag));
 2811                         sc_if->msk_cdata.msk_tx_cnt++;
 2812                         MSK_INC(prod, MSK_TX_RING_CNT);
 2813                 } else {
 2814                         tx_le->msk_control |= htole32(OP_VLAN |
 2815                             htons(m->m_pkthdr.ether_vtag));
 2816                 }
 2817                 control |= INS_VLAN;
 2818         }
 2819         /* Check if we have to handle checksum offload. */
 2820         if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
 2821                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
 2822                         control |= CALSUM;
 2823                 else {
 2824                         control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2825                         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2826                                 control |= UDPTCP;
 2827                         /* Checksum write position. */
 2828                         csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
 2829                         /* Checksum start position. */
 2830                         csum |= (uint32_t)tcp_offset << 16;
 2831                         if (csum != sc_if->msk_cdata.msk_last_csum) {
 2832                                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2833                                 tx_le->msk_addr = htole32(csum);
 2834                                 tx_le->msk_control = htole32(1 << 16 |
 2835                                     (OP_TCPLISW | HW_OWNER));
 2836                                 sc_if->msk_cdata.msk_tx_cnt++;
 2837                                 MSK_INC(prod, MSK_TX_RING_CNT);
 2838                                 sc_if->msk_cdata.msk_last_csum = csum;
 2839                         }
 2840                 }
 2841         }
 2842 
 2843 #ifdef MSK_64BIT_DMA
 2844         if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
 2845             sc_if->msk_cdata.msk_tx_high_addr) {
 2846                 sc_if->msk_cdata.msk_tx_high_addr =
 2847                     MSK_ADDR_HI(txsegs[0].ds_addr);
 2848                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2849                 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
 2850                 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2851                 sc_if->msk_cdata.msk_tx_cnt++;
 2852                 MSK_INC(prod, MSK_TX_RING_CNT);
 2853         }
 2854 #endif
 2855         si = prod;
 2856         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2857         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2858         if (tso == 0)
 2859                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2860                     OP_PACKET);
 2861         else
 2862                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2863                     OP_LARGESEND);
 2864         sc_if->msk_cdata.msk_tx_cnt++;
 2865         MSK_INC(prod, MSK_TX_RING_CNT);
 2866 
 2867         for (i = 1; i < nseg; i++) {
 2868                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2869 #ifdef MSK_64BIT_DMA
 2870                 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
 2871                     sc_if->msk_cdata.msk_tx_high_addr) {
 2872                         sc_if->msk_cdata.msk_tx_high_addr =
 2873                             MSK_ADDR_HI(txsegs[i].ds_addr);
 2874                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2875                         tx_le->msk_addr =
 2876                             htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
 2877                         tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2878                         sc_if->msk_cdata.msk_tx_cnt++;
 2879                         MSK_INC(prod, MSK_TX_RING_CNT);
 2880                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2881                 }
 2882 #endif
 2883                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2884                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2885                     OP_BUFFER | HW_OWNER);
 2886                 sc_if->msk_cdata.msk_tx_cnt++;
 2887                 MSK_INC(prod, MSK_TX_RING_CNT);
 2888         }
 2889         /* Update producer index. */
 2890         sc_if->msk_cdata.msk_tx_prod = prod;
 2891 
 2892         /* Set EOP on the last descriptor. */
 2893         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2894         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2895         tx_le->msk_control |= htole32(EOP);
 2896 
 2897         /* Turn the first descriptor ownership to hardware. */
 2898         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2899         tx_le->msk_control |= htole32(HW_OWNER);
 2900 
 2901         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2902         map = txd_last->tx_dmamap;
 2903         txd_last->tx_dmamap = txd->tx_dmamap;
 2904         txd->tx_dmamap = map;
 2905         txd->tx_m = m;
 2906 
 2907         /* Sync descriptors. */
 2908         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2909         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 2910             sc_if->msk_cdata.msk_tx_ring_map,
 2911             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2912 
 2913         return (0);
 2914 }
 2915 
 2916 static void
 2917 msk_start(struct ifnet *ifp)
 2918 {
 2919         struct msk_if_softc *sc_if;
 2920 
 2921         sc_if = ifp->if_softc;
 2922         MSK_IF_LOCK(sc_if);
 2923         msk_start_locked(ifp);
 2924         MSK_IF_UNLOCK(sc_if);
 2925 }
 2926 
 2927 static void
 2928 msk_start_locked(struct ifnet *ifp)
 2929 {
 2930         struct msk_if_softc *sc_if;
 2931         struct mbuf *m_head;
 2932         int enq;
 2933 
 2934         sc_if = ifp->if_softc;
 2935         MSK_IF_LOCK_ASSERT(sc_if);
 2936 
 2937         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2938             IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 2939                 return;
 2940 
 2941         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2942             sc_if->msk_cdata.msk_tx_cnt <
 2943             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
 2944                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2945                 if (m_head == NULL)
 2946                         break;
 2947                 /*
 2948                  * Pack the data into the transmit ring. If we
 2949                  * don't have room, set the OACTIVE flag and wait
 2950                  * for the NIC to drain the ring.
 2951                  */
 2952                 if (msk_encap(sc_if, &m_head) != 0) {
 2953                         if (m_head == NULL)
 2954                                 break;
 2955                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2956                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2957                         break;
 2958                 }
 2959 
 2960                 enq++;
 2961                 /*
 2962                  * If there's a BPF listener, bounce a copy of this frame
 2963                  * to him.
 2964                  */
 2965                 ETHER_BPF_MTAP(ifp, m_head);
 2966         }
 2967 
 2968         if (enq > 0) {
 2969                 /* Transmit */
 2970                 CSR_WRITE_2(sc_if->msk_softc,
 2971                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2972                     sc_if->msk_cdata.msk_tx_prod);
 2973 
 2974                 /* Set a timeout in case the chip goes out to lunch. */
 2975                 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
 2976         }
 2977 }
 2978 
 2979 static void
 2980 msk_watchdog(struct msk_if_softc *sc_if)
 2981 {
 2982         struct ifnet *ifp;
 2983 
 2984         MSK_IF_LOCK_ASSERT(sc_if);
 2985 
 2986         if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
 2987                 return;
 2988         ifp = sc_if->msk_ifp;
 2989         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2990                 if (bootverbose)
 2991                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2992                            "(missed link)\n");
 2993                 ifp->if_oerrors++;
 2994                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2995                 msk_init_locked(sc_if);
 2996                 return;
 2997         }
 2998 
 2999         if_printf(ifp, "watchdog timeout\n");
 3000         ifp->if_oerrors++;
 3001         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 3002         msk_init_locked(sc_if);
 3003         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 3004                 msk_start_locked(ifp);
 3005 }
 3006 
 3007 static int
 3008 mskc_shutdown(device_t dev)
 3009 {
 3010         struct msk_softc *sc;
 3011         int i;
 3012 
 3013         sc = device_get_softc(dev);
 3014         MSK_LOCK(sc);
 3015         for (i = 0; i < sc->msk_num_port; i++) {
 3016                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3017                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3018                     IFF_DRV_RUNNING) != 0))
 3019                         msk_stop(sc->msk_if[i]);
 3020         }
 3021         MSK_UNLOCK(sc);
 3022 
 3023         /* Put hardware reset. */
 3024         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3025         return (0);
 3026 }
 3027 
 3028 static int
 3029 mskc_suspend(device_t dev)
 3030 {
 3031         struct msk_softc *sc;
 3032         int i;
 3033 
 3034         sc = device_get_softc(dev);
 3035 
 3036         MSK_LOCK(sc);
 3037 
 3038         for (i = 0; i < sc->msk_num_port; i++) {
 3039                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3040                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3041                     IFF_DRV_RUNNING) != 0))
 3042                         msk_stop(sc->msk_if[i]);
 3043         }
 3044 
 3045         /* Disable all interrupts. */
 3046         CSR_WRITE_4(sc, B0_IMSK, 0);
 3047         CSR_READ_4(sc, B0_IMSK);
 3048         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 3049         CSR_READ_4(sc, B0_HWE_IMSK);
 3050 
 3051         msk_phy_power(sc, MSK_PHY_POWERDOWN);
 3052 
 3053         /* Put hardware reset. */
 3054         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3055         sc->msk_pflags |= MSK_FLAG_SUSPEND;
 3056 
 3057         MSK_UNLOCK(sc);
 3058 
 3059         return (0);
 3060 }
 3061 
 3062 static int
 3063 mskc_resume(device_t dev)
 3064 {
 3065         struct msk_softc *sc;
 3066         int i;
 3067 
 3068         sc = device_get_softc(dev);
 3069 
 3070         MSK_LOCK(sc);
 3071 
 3072         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 3073         mskc_reset(sc);
 3074         for (i = 0; i < sc->msk_num_port; i++) {
 3075                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3076                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
 3077                         sc->msk_if[i]->msk_ifp->if_drv_flags &=
 3078                             ~IFF_DRV_RUNNING;
 3079                         msk_init_locked(sc->msk_if[i]);
 3080                 }
 3081         }
 3082         sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
 3083 
 3084         MSK_UNLOCK(sc);
 3085 
 3086         return (0);
 3087 }
 3088 
 3089 #ifndef __NO_STRICT_ALIGNMENT
 3090 static __inline void
 3091 msk_fixup_rx(struct mbuf *m)
 3092 {
 3093         int i;
 3094         uint16_t *src, *dst;
 3095 
 3096         src = mtod(m, uint16_t *);
 3097         dst = src - 3;
 3098 
 3099         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 3100                 *dst++ = *src++;
 3101 
 3102         m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
 3103 }
 3104 #endif
 3105 
 3106 static __inline void
 3107 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
 3108 {
 3109         struct ether_header *eh;
 3110         struct ip *ip;
 3111         struct udphdr *uh;
 3112         int32_t hlen, len, pktlen, temp32;
 3113         uint16_t csum, *opts;
 3114 
 3115         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
 3116                 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3117                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3118                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3119                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3120                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3121                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3122                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3123                                     CSUM_PSEUDO_HDR;
 3124                                 m->m_pkthdr.csum_data = 0xffff;
 3125                         }
 3126                 }
 3127                 return;
 3128         }
 3129         /*
 3130          * Marvell Yukon controllers that support OP_RXCHKS has known
 3131          * to have various Rx checksum offloading bugs. These
 3132          * controllers can be configured to compute simple checksum
 3133          * at two different positions. So we can compute IP and TCP/UDP
 3134          * checksum at the same time. We intentionally have controller
 3135          * compute TCP/UDP checksum twice by specifying the same
 3136          * checksum start position and compare the result. If the value
 3137          * is different it would indicate the hardware logic was wrong.
 3138          */
 3139         if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
 3140                 if (bootverbose)
 3141                         device_printf(sc_if->msk_if_dev,
 3142                             "Rx checksum value mismatch!\n");
 3143                 return;
 3144         }
 3145         pktlen = m->m_pkthdr.len;
 3146         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 3147                 return;
 3148         eh = mtod(m, struct ether_header *);
 3149         if (eh->ether_type != htons(ETHERTYPE_IP))
 3150                 return;
 3151         ip = (struct ip *)(eh + 1);
 3152         if (ip->ip_v != IPVERSION)
 3153                 return;
 3154 
 3155         hlen = ip->ip_hl << 2;
 3156         pktlen -= sizeof(struct ether_header);
 3157         if (hlen < sizeof(struct ip))
 3158                 return;
 3159         if (ntohs(ip->ip_len) < hlen)
 3160                 return;
 3161         if (ntohs(ip->ip_len) != pktlen)
 3162                 return;
 3163         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 3164                 return; /* can't handle fragmented packet. */
 3165 
 3166         switch (ip->ip_p) {
 3167         case IPPROTO_TCP:
 3168                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 3169                         return;
 3170                 break;
 3171         case IPPROTO_UDP:
 3172                 if (pktlen < (hlen + sizeof(struct udphdr)))
 3173                         return;
 3174                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 3175                 if (uh->uh_sum == 0)
 3176                         return; /* no checksum */
 3177                 break;
 3178         default:
 3179                 return;
 3180         }
 3181         csum = bswap16(sc_if->msk_csum & 0xFFFF);
 3182         /* Checksum fixup for IP options. */
 3183         len = hlen - sizeof(struct ip);
 3184         if (len > 0) {
 3185                 opts = (uint16_t *)(ip + 1);
 3186                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 3187                         temp32 = csum - *opts;
 3188                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 3189                         csum = temp32 & 65535;
 3190                 }
 3191         }
 3192         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 3193         m->m_pkthdr.csum_data = csum;
 3194 }
 3195 
 3196 static void
 3197 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3198     int len)
 3199 {
 3200         struct mbuf *m;
 3201         struct ifnet *ifp;
 3202         struct msk_rxdesc *rxd;
 3203         int cons, rxlen;
 3204 
 3205         ifp = sc_if->msk_ifp;
 3206 
 3207         MSK_IF_LOCK_ASSERT(sc_if);
 3208 
 3209         cons = sc_if->msk_cdata.msk_rx_cons;
 3210         do {
 3211                 rxlen = status >> 16;
 3212                 if ((status & GMR_FS_VLAN) != 0 &&
 3213                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3214                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3215                 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
 3216                         /*
 3217                          * For controllers that returns bogus status code
 3218                          * just do minimal check and let upper stack
 3219                          * handle this frame.
 3220                          */
 3221                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 3222                                 ifp->if_ierrors++;
 3223                                 msk_discard_rxbuf(sc_if, cons);
 3224                                 break;
 3225                         }
 3226                 } else if (len > sc_if->msk_framesize ||
 3227                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3228                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3229                         /* Don't count flow-control packet as errors. */
 3230                         if ((status & GMR_FS_GOOD_FC) == 0)
 3231                                 ifp->if_ierrors++;
 3232                         msk_discard_rxbuf(sc_if, cons);
 3233                         break;
 3234                 }
 3235 #ifdef MSK_64BIT_DMA
 3236                 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
 3237                     MSK_RX_RING_CNT];
 3238 #else
 3239                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 3240 #endif
 3241                 m = rxd->rx_m;
 3242                 if (msk_newbuf(sc_if, cons) != 0) {
 3243                         ifp->if_iqdrops++;
 3244                         /* Reuse old buffer. */
 3245                         msk_discard_rxbuf(sc_if, cons);
 3246                         break;
 3247                 }
 3248                 m->m_pkthdr.rcvif = ifp;
 3249                 m->m_pkthdr.len = m->m_len = len;
 3250 #ifndef __NO_STRICT_ALIGNMENT
 3251                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3252                         msk_fixup_rx(m);
 3253 #endif
 3254                 ifp->if_ipackets++;
 3255                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3256                         msk_rxcsum(sc_if, control, m);
 3257                 /* Check for VLAN tagged packets. */
 3258                 if ((status & GMR_FS_VLAN) != 0 &&
 3259                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3260                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3261                         m->m_flags |= M_VLANTAG;
 3262                 }
 3263                 MSK_IF_UNLOCK(sc_if);
 3264                 (*ifp->if_input)(ifp, m);
 3265                 MSK_IF_LOCK(sc_if);
 3266         } while (0);
 3267 
 3268         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 3269         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 3270 }
 3271 
 3272 static void
 3273 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3274     int len)
 3275 {
 3276         struct mbuf *m;
 3277         struct ifnet *ifp;
 3278         struct msk_rxdesc *jrxd;
 3279         int cons, rxlen;
 3280 
 3281         ifp = sc_if->msk_ifp;
 3282 
 3283         MSK_IF_LOCK_ASSERT(sc_if);
 3284 
 3285         cons = sc_if->msk_cdata.msk_rx_cons;
 3286         do {
 3287                 rxlen = status >> 16;
 3288                 if ((status & GMR_FS_VLAN) != 0 &&
 3289                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3290                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3291                 if (len > sc_if->msk_framesize ||
 3292                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3293                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3294                         /* Don't count flow-control packet as errors. */
 3295                         if ((status & GMR_FS_GOOD_FC) == 0)
 3296                                 ifp->if_ierrors++;
 3297                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3298                         break;
 3299                 }
 3300 #ifdef MSK_64BIT_DMA
 3301                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
 3302                     MSK_JUMBO_RX_RING_CNT];
 3303 #else
 3304                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 3305 #endif
 3306                 m = jrxd->rx_m;
 3307                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 3308                         ifp->if_iqdrops++;
 3309                         /* Reuse old buffer. */
 3310                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3311                         break;
 3312                 }
 3313                 m->m_pkthdr.rcvif = ifp;
 3314                 m->m_pkthdr.len = m->m_len = len;
 3315 #ifndef __NO_STRICT_ALIGNMENT
 3316                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3317                         msk_fixup_rx(m);
 3318 #endif
 3319                 ifp->if_ipackets++;
 3320                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3321                         msk_rxcsum(sc_if, control, m);
 3322                 /* Check for VLAN tagged packets. */
 3323                 if ((status & GMR_FS_VLAN) != 0 &&
 3324                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3325                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3326                         m->m_flags |= M_VLANTAG;
 3327                 }
 3328                 MSK_IF_UNLOCK(sc_if);
 3329                 (*ifp->if_input)(ifp, m);
 3330                 MSK_IF_LOCK(sc_if);
 3331         } while (0);
 3332 
 3333         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 3334         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 3335 }
 3336 
 3337 static void
 3338 msk_txeof(struct msk_if_softc *sc_if, int idx)
 3339 {
 3340         struct msk_txdesc *txd;
 3341         struct msk_tx_desc *cur_tx;
 3342         struct ifnet *ifp;
 3343         uint32_t control;
 3344         int cons, prog;
 3345 
 3346         MSK_IF_LOCK_ASSERT(sc_if);
 3347 
 3348         ifp = sc_if->msk_ifp;
 3349 
 3350         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 3351             sc_if->msk_cdata.msk_tx_ring_map,
 3352             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3353         /*
 3354          * Go through our tx ring and free mbufs for those
 3355          * frames that have been sent.
 3356          */
 3357         cons = sc_if->msk_cdata.msk_tx_cons;
 3358         prog = 0;
 3359         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 3360                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 3361                         break;
 3362                 prog++;
 3363                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 3364                 control = le32toh(cur_tx->msk_control);
 3365                 sc_if->msk_cdata.msk_tx_cnt--;
 3366                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3367                 if ((control & EOP) == 0)
 3368                         continue;
 3369                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 3370                 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
 3371                     BUS_DMASYNC_POSTWRITE);
 3372                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 3373 
 3374                 ifp->if_opackets++;
 3375                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 3376                     __func__));
 3377                 m_freem(txd->tx_m);
 3378                 txd->tx_m = NULL;
 3379         }
 3380 
 3381         if (prog > 0) {
 3382                 sc_if->msk_cdata.msk_tx_cons = cons;
 3383                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 3384                         sc_if->msk_watchdog_timer = 0;
 3385                 /* No need to sync LEs as we didn't update LEs. */
 3386         }
 3387 }
 3388 
 3389 static void
 3390 msk_tick(void *xsc_if)
 3391 {
 3392         struct msk_if_softc *sc_if;
 3393         struct mii_data *mii;
 3394 
 3395         sc_if = xsc_if;
 3396 
 3397         MSK_IF_LOCK_ASSERT(sc_if);
 3398 
 3399         mii = device_get_softc(sc_if->msk_miibus);
 3400 
 3401         mii_tick(mii);
 3402         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 3403                 msk_miibus_statchg(sc_if->msk_if_dev);
 3404         msk_handle_events(sc_if->msk_softc);
 3405         msk_watchdog(sc_if);
 3406         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3407 }
 3408 
 3409 static void
 3410 msk_intr_phy(struct msk_if_softc *sc_if)
 3411 {
 3412         uint16_t status;
 3413 
 3414         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3415         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3416         /* Handle FIFO Underrun/Overflow? */
 3417         if ((status & PHY_M_IS_FIFO_ERROR))
 3418                 device_printf(sc_if->msk_if_dev,
 3419                     "PHY FIFO underrun/overflow.\n");
 3420 }
 3421 
 3422 static void
 3423 msk_intr_gmac(struct msk_if_softc *sc_if)
 3424 {
 3425         struct msk_softc *sc;
 3426         uint8_t status;
 3427 
 3428         sc = sc_if->msk_softc;
 3429         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3430 
 3431         /* GMAC Rx FIFO overrun. */
 3432         if ((status & GM_IS_RX_FF_OR) != 0)
 3433                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3434                     GMF_CLI_RX_FO);
 3435         /* GMAC Tx FIFO underrun. */
 3436         if ((status & GM_IS_TX_FF_UR) != 0) {
 3437                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3438                     GMF_CLI_TX_FU);
 3439                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3440                 /*
 3441                  * XXX
 3442                  * In case of Tx underrun, we may need to flush/reset
 3443                  * Tx MAC but that would also require resynchronization
 3444                  * with status LEs. Reinitializing status LEs would
 3445                  * affect other port in dual MAC configuration so it
 3446                  * should be avoided as possible as we can.
 3447                  * Due to lack of documentation it's all vague guess but
 3448                  * it needs more investigation.
 3449                  */
 3450         }
 3451 }
 3452 
 3453 static void
 3454 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3455 {
 3456         struct msk_softc *sc;
 3457 
 3458         sc = sc_if->msk_softc;
 3459         if ((status & Y2_IS_PAR_RD1) != 0) {
 3460                 device_printf(sc_if->msk_if_dev,
 3461                     "RAM buffer read parity error\n");
 3462                 /* Clear IRQ. */
 3463                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3464                     RI_CLR_RD_PERR);
 3465         }
 3466         if ((status & Y2_IS_PAR_WR1) != 0) {
 3467                 device_printf(sc_if->msk_if_dev,
 3468                     "RAM buffer write parity error\n");
 3469                 /* Clear IRQ. */
 3470                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3471                     RI_CLR_WR_PERR);
 3472         }
 3473         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3474                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3475                 /* Clear IRQ. */
 3476                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3477                     GMF_CLI_TX_PE);
 3478         }
 3479         if ((status & Y2_IS_PAR_RX1) != 0) {
 3480                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3481                 /* Clear IRQ. */
 3482                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3483         }
 3484         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3485                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3486                 /* Clear IRQ. */
 3487                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3488         }
 3489 }
 3490 
 3491 static void
 3492 msk_intr_hwerr(struct msk_softc *sc)
 3493 {
 3494         uint32_t status;
 3495         uint32_t tlphead[4];
 3496 
 3497         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3498         /* Time Stamp timer overflow. */
 3499         if ((status & Y2_IS_TIST_OV) != 0)
 3500                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3501         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3502                 /*
 3503                  * PCI Express Error occured which is not described in PEX
 3504                  * spec.
 3505                  * This error is also mapped either to Master Abort(
 3506                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3507                  * can only be cleared there.
 3508                  */
 3509                 device_printf(sc->msk_dev,
 3510                     "PCI Express protocol violation error\n");
 3511         }
 3512 
 3513         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3514                 uint16_t v16;
 3515 
 3516                 if ((status & Y2_IS_MST_ERR) != 0)
 3517                         device_printf(sc->msk_dev,
 3518                             "unexpected IRQ Status error\n");
 3519                 else
 3520                         device_printf(sc->msk_dev,
 3521                             "unexpected IRQ Master error\n");
 3522                 /* Reset all bits in the PCI status register. */
 3523                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3524                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3525                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3526                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3527                     PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 3528                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3529         }
 3530 
 3531         /* Check for PCI Express Uncorrectable Error. */
 3532         if ((status & Y2_IS_PCI_EXP) != 0) {
 3533                 uint32_t v32;
 3534 
 3535                 /*
 3536                  * On PCI Express bus bridges are called root complexes (RC).
 3537                  * PCI Express errors are recognized by the root complex too,
 3538                  * which requests the system to handle the problem. After
 3539                  * error occurence it may be that no access to the adapter
 3540                  * may be performed any longer.
 3541                  */
 3542 
 3543                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3544                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3545                         /* Ignore unsupported request error. */
 3546                         device_printf(sc->msk_dev,
 3547                             "Uncorrectable PCI Express error\n");
 3548                 }
 3549                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3550                         int i;
 3551 
 3552                         /* Get TLP header form Log Registers. */
 3553                         for (i = 0; i < 4; i++)
 3554                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3555                                     PEX_HEADER_LOG + i * 4);
 3556                         /* Check for vendor defined broadcast message. */
 3557                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3558                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3559                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3560                                     sc->msk_intrhwemask);
 3561                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3562                         }
 3563                 }
 3564                 /* Clear the interrupt. */
 3565                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3566                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3567                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3568         }
 3569 
 3570         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3571                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3572         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3573                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3574 }
 3575 
 3576 static __inline void
 3577 msk_rxput(struct msk_if_softc *sc_if)
 3578 {
 3579         struct msk_softc *sc;
 3580 
 3581         sc = sc_if->msk_softc;
 3582         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
 3583                 bus_dmamap_sync(
 3584                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3585                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3586                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3587         else
 3588                 bus_dmamap_sync(
 3589                     sc_if->msk_cdata.msk_rx_ring_tag,
 3590                     sc_if->msk_cdata.msk_rx_ring_map,
 3591                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3592         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3593             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3594 }
 3595 
 3596 static int
 3597 msk_handle_events(struct msk_softc *sc)
 3598 {
 3599         struct msk_if_softc *sc_if;
 3600         int rxput[2];
 3601         struct msk_stat_desc *sd;
 3602         uint32_t control, status;
 3603         int cons, len, port, rxprog;
 3604 
 3605         if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
 3606                 return (0);
 3607 
 3608         /* Sync status LEs. */
 3609         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3610             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3611 
 3612         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3613         rxprog = 0;
 3614         cons = sc->msk_stat_cons;
 3615         for (;;) {
 3616                 sd = &sc->msk_stat_ring[cons];
 3617                 control = le32toh(sd->msk_control);
 3618                 if ((control & HW_OWNER) == 0)
 3619                         break;
 3620                 control &= ~HW_OWNER;
 3621                 sd->msk_control = htole32(control);
 3622                 status = le32toh(sd->msk_status);
 3623                 len = control & STLE_LEN_MASK;
 3624                 port = (control >> 16) & 0x01;
 3625                 sc_if = sc->msk_if[port];
 3626                 if (sc_if == NULL) {
 3627                         device_printf(sc->msk_dev, "invalid port opcode "
 3628                             "0x%08x\n", control & STLE_OP_MASK);
 3629                         continue;
 3630                 }
 3631 
 3632                 switch (control & STLE_OP_MASK) {
 3633                 case OP_RXVLAN:
 3634                         sc_if->msk_vtag = ntohs(len);
 3635                         break;
 3636                 case OP_RXCHKSVLAN:
 3637                         sc_if->msk_vtag = ntohs(len);
 3638                         /* FALLTHROUGH */
 3639                 case OP_RXCHKS:
 3640                         sc_if->msk_csum = status;
 3641                         break;
 3642                 case OP_RXSTAT:
 3643                         if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
 3644                                 break;
 3645                         if (sc_if->msk_framesize >
 3646                             (MCLBYTES - MSK_RX_BUF_ALIGN))
 3647                                 msk_jumbo_rxeof(sc_if, status, control, len);
 3648                         else
 3649                                 msk_rxeof(sc_if, status, control, len);
 3650                         rxprog++;
 3651                         /*
 3652                          * Because there is no way to sync single Rx LE
 3653                          * put the DMA sync operation off until the end of
 3654                          * event processing.
 3655                          */
 3656                         rxput[port]++;
 3657                         /* Update prefetch unit if we've passed water mark. */
 3658                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3659                                 msk_rxput(sc_if);
 3660                                 rxput[port] = 0;
 3661                         }
 3662                         break;
 3663                 case OP_TXINDEXLE:
 3664                         if (sc->msk_if[MSK_PORT_A] != NULL)
 3665                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3666                                     status & STLE_TXA1_MSKL);
 3667                         if (sc->msk_if[MSK_PORT_B] != NULL)
 3668                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3669                                     ((status & STLE_TXA2_MSKL) >>
 3670                                     STLE_TXA2_SHIFTL) |
 3671                                     ((len & STLE_TXA2_MSKH) <<
 3672                                     STLE_TXA2_SHIFTH));
 3673                         break;
 3674                 default:
 3675                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3676                             control & STLE_OP_MASK);
 3677                         break;
 3678                 }
 3679                 MSK_INC(cons, sc->msk_stat_count);
 3680                 if (rxprog > sc->msk_process_limit)
 3681                         break;
 3682         }
 3683 
 3684         sc->msk_stat_cons = cons;
 3685         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3686             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3687 
 3688         if (rxput[MSK_PORT_A] > 0)
 3689                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3690         if (rxput[MSK_PORT_B] > 0)
 3691                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3692 
 3693         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3694 }
 3695 
 3696 static void
 3697 msk_intr(void *xsc)
 3698 {
 3699         struct msk_softc *sc;
 3700         struct msk_if_softc *sc_if0, *sc_if1;
 3701         struct ifnet *ifp0, *ifp1;
 3702         uint32_t status;
 3703         int domore;
 3704 
 3705         sc = xsc;
 3706         MSK_LOCK(sc);
 3707 
 3708         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3709         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3710         if (status == 0 || status == 0xffffffff ||
 3711             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3712             (status & sc->msk_intrmask) == 0) {
 3713                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3714                 MSK_UNLOCK(sc);
 3715                 return;
 3716         }
 3717 
 3718         sc_if0 = sc->msk_if[MSK_PORT_A];
 3719         sc_if1 = sc->msk_if[MSK_PORT_B];
 3720         ifp0 = ifp1 = NULL;
 3721         if (sc_if0 != NULL)
 3722                 ifp0 = sc_if0->msk_ifp;
 3723         if (sc_if1 != NULL)
 3724                 ifp1 = sc_if1->msk_ifp;
 3725 
 3726         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3727                 msk_intr_phy(sc_if0);
 3728         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3729                 msk_intr_phy(sc_if1);
 3730         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3731                 msk_intr_gmac(sc_if0);
 3732         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3733                 msk_intr_gmac(sc_if1);
 3734         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3735                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3736                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3737                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3738                 CSR_READ_4(sc, B0_IMSK);
 3739         }
 3740         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3741                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3742                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3743                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3744                 CSR_READ_4(sc, B0_IMSK);
 3745         }
 3746         if ((status & Y2_IS_HW_ERR) != 0)
 3747                 msk_intr_hwerr(sc);
 3748 
 3749         domore = msk_handle_events(sc);
 3750         if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
 3751                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3752 
 3753         /* Reenable interrupts. */
 3754         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3755 
 3756         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3757             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3758                 msk_start_locked(ifp0);
 3759         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3760             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3761                 msk_start_locked(ifp1);
 3762 
 3763         MSK_UNLOCK(sc);
 3764 }
 3765 
 3766 static void
 3767 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3768 {
 3769         struct msk_softc *sc;
 3770         struct ifnet *ifp;
 3771 
 3772         ifp = sc_if->msk_ifp;
 3773         sc = sc_if->msk_softc;
 3774         if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
 3775             sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
 3776             sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
 3777                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3778                     TX_STFW_ENA);
 3779         } else {
 3780                 if (ifp->if_mtu > ETHERMTU) {
 3781                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3782                         CSR_WRITE_4(sc,
 3783                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3784                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3785                         /* Disable Store & Forward mode for Tx. */
 3786                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3787                             TX_STFW_DIS);
 3788                 } else {
 3789                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3790                             TX_STFW_ENA);
 3791                 }
 3792         }
 3793 }
 3794 
 3795 static void
 3796 msk_init(void *xsc)
 3797 {
 3798         struct msk_if_softc *sc_if = xsc;
 3799 
 3800         MSK_IF_LOCK(sc_if);
 3801         msk_init_locked(sc_if);
 3802         MSK_IF_UNLOCK(sc_if);
 3803 }
 3804 
 3805 static void
 3806 msk_init_locked(struct msk_if_softc *sc_if)
 3807 {
 3808         struct msk_softc *sc;
 3809         struct ifnet *ifp;
 3810         struct mii_data  *mii;
 3811         uint8_t *eaddr;
 3812         uint16_t gmac;
 3813         uint32_t reg;
 3814         int error;
 3815 
 3816         MSK_IF_LOCK_ASSERT(sc_if);
 3817 
 3818         ifp = sc_if->msk_ifp;
 3819         sc = sc_if->msk_softc;
 3820         mii = device_get_softc(sc_if->msk_miibus);
 3821 
 3822         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 3823                 return;
 3824 
 3825         error = 0;
 3826         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3827         msk_stop(sc_if);
 3828 
 3829         if (ifp->if_mtu < ETHERMTU)
 3830                 sc_if->msk_framesize = ETHERMTU;
 3831         else
 3832                 sc_if->msk_framesize = ifp->if_mtu;
 3833         sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3834         if (ifp->if_mtu > ETHERMTU &&
 3835             (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 3836                 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 3837                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 3838         }
 3839 
 3840         /* GMAC Control reset. */
 3841         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3842         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3843         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3844         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3845             sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 3846                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3847                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3848                     GMC_BYP_RETR_ON);
 3849 
 3850         /*
 3851          * Initialize GMAC first such that speed/duplex/flow-control
 3852          * parameters are renegotiated when interface is brought up.
 3853          */
 3854         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3855 
 3856         /* Dummy read the Interrupt Source Register. */
 3857         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3858 
 3859         /* Clear MIB stats. */
 3860         msk_stats_clear(sc_if);
 3861 
 3862         /* Disable FCS. */
 3863         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3864 
 3865         /* Setup Transmit Control Register. */
 3866         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3867 
 3868         /* Setup Transmit Flow Control Register. */
 3869         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3870 
 3871         /* Setup Transmit Parameter Register. */
 3872         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3873             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3874             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3875 
 3876         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3877             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3878 
 3879         if (ifp->if_mtu > ETHERMTU)
 3880                 gmac |= GM_SMOD_JUMBO_ENA;
 3881         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3882 
 3883         /* Set station address. */
 3884         eaddr = IF_LLADDR(ifp);
 3885         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
 3886             eaddr[0] | (eaddr[1] << 8));
 3887         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
 3888             eaddr[2] | (eaddr[3] << 8));
 3889         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
 3890             eaddr[4] | (eaddr[5] << 8));
 3891         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
 3892             eaddr[0] | (eaddr[1] << 8));
 3893         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
 3894             eaddr[2] | (eaddr[3] << 8));
 3895         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
 3896             eaddr[4] | (eaddr[5] << 8));
 3897 
 3898         /* Disable interrupts for counter overflows. */
 3899         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3900         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3901         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3902 
 3903         /* Configure Rx MAC FIFO. */
 3904         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3905         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3906         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3907         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3908             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3909                 reg |= GMF_RX_OVER_ON;
 3910         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3911 
 3912         /* Set receive filter. */
 3913         msk_rxfilter(sc_if);
 3914 
 3915         if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 3916                 /* Clear flush mask - HW bug. */
 3917                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
 3918         } else {
 3919                 /* Flush Rx MAC FIFO on any flow control or error. */
 3920                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3921                     GMR_FS_ANY_ERR);
 3922         }
 3923 
 3924         /*
 3925          * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
 3926          * due to hardware hang on receipt of pause frames.
 3927          */
 3928         reg = RX_GMF_FL_THR_DEF + 1;
 3929         /* Another magic for Yukon FE+ - From Linux. */
 3930         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3931             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3932                 reg = 0x178;
 3933         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3934 
 3935         /* Configure Tx MAC FIFO. */
 3936         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3937         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3938         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3939 
 3940         /* Configure hardware VLAN tag insertion/stripping. */
 3941         msk_setvlan(sc_if, ifp);
 3942 
 3943         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3944                 /* Set Rx Pause threshold. */
 3945                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3946                     MSK_ECU_LLPP);
 3947                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3948                     MSK_ECU_ULPP);
 3949                 /* Configure store-and-forward for Tx. */
 3950                 msk_set_tx_stfwd(sc_if);
 3951         }
 3952 
 3953         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3954             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3955                 /* Disable dynamic watermark - from Linux. */
 3956                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3957                 reg &= ~0x03;
 3958                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3959         }
 3960 
 3961         /*
 3962          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3963          * arbiter as we don't use Sync Tx queue.
 3964          */
 3965         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3966             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3967         /* Enable the RAM Interface Arbiter. */
 3968         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3969 
 3970         /* Setup RAM buffer. */
 3971         msk_set_rambuffer(sc_if);
 3972 
 3973         /* Disable Tx sync Queue. */
 3974         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3975 
 3976         /* Setup Tx Queue Bus Memory Interface. */
 3977         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3978         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3979         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3980         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3981         switch (sc->msk_hw_id) {
 3982         case CHIP_ID_YUKON_EC_U:
 3983                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3984                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3985                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3986                             MSK_ECU_TXFF_LEV);
 3987                 }
 3988                 break;
 3989         case CHIP_ID_YUKON_EX:
 3990                 /*
 3991                  * Yukon Extreme seems to have silicon bug for
 3992                  * automatic Tx checksum calculation capability.
 3993                  */
 3994                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 3995                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3996                             F_TX_CHK_AUTO_OFF);
 3997                 break;
 3998         }
 3999 
 4000         /* Setup Rx Queue Bus Memory Interface. */
 4001         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 4002         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 4003         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 4004         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 4005         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 4006             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 4007                 /* MAC Rx RAM Read is controlled by hardware. */
 4008                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 4009         }
 4010 
 4011         msk_set_prefetch(sc, sc_if->msk_txq,
 4012             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 4013         msk_init_tx_ring(sc_if);
 4014 
 4015         /* Disable Rx checksum offload and RSS hash. */
 4016         reg = BMU_DIS_RX_RSS_HASH;
 4017         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 4018             (ifp->if_capenable & IFCAP_RXCSUM) != 0)
 4019                 reg |= BMU_ENA_RX_CHKSUM;
 4020         else
 4021                 reg |= BMU_DIS_RX_CHKSUM;
 4022         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
 4023         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
 4024                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4025                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 4026                     MSK_JUMBO_RX_RING_CNT - 1);
 4027                 error = msk_init_jumbo_rx_ring(sc_if);
 4028          } else {
 4029                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4030                     sc_if->msk_rdata.msk_rx_ring_paddr,
 4031                     MSK_RX_RING_CNT - 1);
 4032                 error = msk_init_rx_ring(sc_if);
 4033         }
 4034         if (error != 0) {
 4035                 device_printf(sc_if->msk_if_dev,
 4036                     "initialization failed: no memory for Rx buffers\n");
 4037                 msk_stop(sc_if);
 4038                 return;
 4039         }
 4040         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 4041             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 4042                 /* Disable flushing of non-ASF packets. */
 4043                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 4044                     GMF_RX_MACSEC_FLUSH_OFF);
 4045         }
 4046 
 4047         /* Configure interrupt handling. */
 4048         if (sc_if->msk_port == MSK_PORT_A) {
 4049                 sc->msk_intrmask |= Y2_IS_PORT_A;
 4050                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 4051         } else {
 4052                 sc->msk_intrmask |= Y2_IS_PORT_B;
 4053                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 4054         }
 4055         /* Configure IRQ moderation mask. */
 4056         CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
 4057         if (sc->msk_int_holdoff > 0) {
 4058                 /* Configure initial IRQ moderation timer value. */
 4059                 CSR_WRITE_4(sc, B2_IRQM_INI,
 4060                     MSK_USECS(sc, sc->msk_int_holdoff));
 4061                 CSR_WRITE_4(sc, B2_IRQM_VAL,
 4062                     MSK_USECS(sc, sc->msk_int_holdoff));
 4063                 /* Start IRQ moderation. */
 4064                 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
 4065         }
 4066         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4067         CSR_READ_4(sc, B0_HWE_IMSK);
 4068         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4069         CSR_READ_4(sc, B0_IMSK);
 4070 
 4071         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 4072         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 4073 
 4074         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4075         mii_mediachg(mii);
 4076 
 4077         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 4078 }
 4079 
 4080 static void
 4081 msk_set_rambuffer(struct msk_if_softc *sc_if)
 4082 {
 4083         struct msk_softc *sc;
 4084         int ltpp, utpp;
 4085 
 4086         sc = sc_if->msk_softc;
 4087         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 4088                 return;
 4089 
 4090         /* Setup Rx Queue. */
 4091         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 4092         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 4093             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4094         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 4095             sc->msk_rxqend[sc_if->msk_port] / 8);
 4096         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 4097             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4098         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 4099             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4100 
 4101         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4102             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 4103         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4104             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 4105         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 4106                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 4107         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 4108         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 4109         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 4110 
 4111         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 4112         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 4113 
 4114         /* Setup Tx Queue. */
 4115         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 4116         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 4117             sc->msk_txqstart[sc_if->msk_port] / 8);
 4118         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 4119             sc->msk_txqend[sc_if->msk_port] / 8);
 4120         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 4121             sc->msk_txqstart[sc_if->msk_port] / 8);
 4122         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 4123             sc->msk_txqstart[sc_if->msk_port] / 8);
 4124         /* Enable Store & Forward for Tx side. */
 4125         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 4126         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 4127         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 4128 }
 4129 
 4130 static void
 4131 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 4132     uint32_t count)
 4133 {
 4134 
 4135         /* Reset the prefetch unit. */
 4136         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4137             PREF_UNIT_RST_SET);
 4138         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4139             PREF_UNIT_RST_CLR);
 4140         /* Set LE base address. */
 4141         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 4142             MSK_ADDR_LO(addr));
 4143         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 4144             MSK_ADDR_HI(addr));
 4145         /* Set the list last index. */
 4146         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 4147             count);
 4148         /* Turn on prefetch unit. */
 4149         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4150             PREF_UNIT_OP_ON);
 4151         /* Dummy read to ensure write. */
 4152         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 4153 }
 4154 
 4155 static void
 4156 msk_stop(struct msk_if_softc *sc_if)
 4157 {
 4158         struct msk_softc *sc;
 4159         struct msk_txdesc *txd;
 4160         struct msk_rxdesc *rxd;
 4161         struct msk_rxdesc *jrxd;
 4162         struct ifnet *ifp;
 4163         uint32_t val;
 4164         int i;
 4165 
 4166         MSK_IF_LOCK_ASSERT(sc_if);
 4167         sc = sc_if->msk_softc;
 4168         ifp = sc_if->msk_ifp;
 4169 
 4170         callout_stop(&sc_if->msk_tick_ch);
 4171         sc_if->msk_watchdog_timer = 0;
 4172 
 4173         /* Disable interrupts. */
 4174         if (sc_if->msk_port == MSK_PORT_A) {
 4175                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 4176                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 4177         } else {
 4178                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 4179                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 4180         }
 4181         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4182         CSR_READ_4(sc, B0_HWE_IMSK);
 4183         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4184         CSR_READ_4(sc, B0_IMSK);
 4185 
 4186         /* Disable Tx/Rx MAC. */
 4187         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4188         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 4189         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 4190         /* Read again to ensure writing. */
 4191         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4192         /* Update stats and clear counters. */
 4193         msk_stats_update(sc_if);
 4194 
 4195         /* Stop Tx BMU. */
 4196         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 4197         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4198         for (i = 0; i < MSK_TIMEOUT; i++) {
 4199                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 4200                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4201                             BMU_STOP);
 4202                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4203                 } else
 4204                         break;
 4205                 DELAY(1);
 4206         }
 4207         if (i == MSK_TIMEOUT)
 4208                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 4209         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 4210             RB_RST_SET | RB_DIS_OP_MD);
 4211 
 4212         /* Disable all GMAC interrupt. */
 4213         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 4214         /* Disable PHY interrupt. */
 4215         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 4216 
 4217         /* Disable the RAM Interface Arbiter. */
 4218         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 4219 
 4220         /* Reset the PCI FIFO of the async Tx queue */
 4221         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4222             BMU_RST_SET | BMU_FIFO_RST);
 4223 
 4224         /* Reset the Tx prefetch units. */
 4225         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 4226             PREF_UNIT_RST_SET);
 4227 
 4228         /* Reset the RAM Buffer async Tx queue. */
 4229         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 4230 
 4231         /* Reset Tx MAC FIFO. */
 4232         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 4233         /* Set Pause Off. */
 4234         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 4235 
 4236         /*
 4237          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 4238          * reach the end of packet and since we can't make sure that we have
 4239          * incoming data, we must reset the BMU while it is not during a DMA
 4240          * transfer. Since it is possible that the Rx path is still active,
 4241          * the Rx RAM buffer will be stopped first, so any possible incoming
 4242          * data will not trigger a DMA. After the RAM buffer is stopped, the
 4243          * BMU is polled until any DMA in progress is ended and only then it
 4244          * will be reset.
 4245          */
 4246 
 4247         /* Disable the RAM Buffer receive queue. */
 4248         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 4249         for (i = 0; i < MSK_TIMEOUT; i++) {
 4250                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 4251                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 4252                         break;
 4253                 DELAY(1);
 4254         }
 4255         if (i == MSK_TIMEOUT)
 4256                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 4257         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 4258             BMU_RST_SET | BMU_FIFO_RST);
 4259         /* Reset the Rx prefetch unit. */
 4260         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 4261             PREF_UNIT_RST_SET);
 4262         /* Reset the RAM Buffer receive queue. */
 4263         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 4264         /* Reset Rx MAC FIFO. */
 4265         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 4266 
 4267         /* Free Rx and Tx mbufs still in the queues. */
 4268         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 4269                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 4270                 if (rxd->rx_m != NULL) {
 4271                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
 4272                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4273                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 4274                             rxd->rx_dmamap);
 4275                         m_freem(rxd->rx_m);
 4276                         rxd->rx_m = NULL;
 4277                 }
 4278         }
 4279         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 4280                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 4281                 if (jrxd->rx_m != NULL) {
 4282                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4283                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4284                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4285                             jrxd->rx_dmamap);
 4286                         m_freem(jrxd->rx_m);
 4287                         jrxd->rx_m = NULL;
 4288                 }
 4289         }
 4290         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 4291                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 4292                 if (txd->tx_m != NULL) {
 4293                         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
 4294                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 4295                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 4296                             txd->tx_dmamap);
 4297                         m_freem(txd->tx_m);
 4298                         txd->tx_m = NULL;
 4299                 }
 4300         }
 4301 
 4302         /*
 4303          * Mark the interface down.
 4304          */
 4305         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 4306         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4307 }
 4308 
 4309 /*
 4310  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
 4311  * counter clears high 16 bits of the counter such that accessing
 4312  * lower 16 bits should be the last operation.
 4313  */
 4314 #define MSK_READ_MIB32(x, y)                                    \
 4315         (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +       \
 4316         (uint32_t)GMAC_READ_2(sc, x, y)
 4317 #define MSK_READ_MIB64(x, y)                                    \
 4318         (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +        \
 4319         (uint64_t)MSK_READ_MIB32(x, y)
 4320 
 4321 static void
 4322 msk_stats_clear(struct msk_if_softc *sc_if)
 4323 {
 4324         struct msk_softc *sc;
 4325         uint32_t reg;
 4326         uint16_t gmac;
 4327         int i;
 4328 
 4329         MSK_IF_LOCK_ASSERT(sc_if);
 4330 
 4331         sc = sc_if->msk_softc;
 4332         /* Set MIB Clear Counter Mode. */
 4333         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4334         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4335         /* Read all MIB Counters with Clear Mode set. */
 4336         for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
 4337                 reg = MSK_READ_MIB32(sc_if->msk_port, i);
 4338         /* Clear MIB Clear Counter Mode. */
 4339         gmac &= ~GM_PAR_MIB_CLR;
 4340         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4341 }
 4342 
 4343 static void
 4344 msk_stats_update(struct msk_if_softc *sc_if)
 4345 {
 4346         struct msk_softc *sc;
 4347         struct ifnet *ifp;
 4348         struct msk_hw_stats *stats;
 4349         uint16_t gmac;
 4350         uint32_t reg;
 4351 
 4352         MSK_IF_LOCK_ASSERT(sc_if);
 4353 
 4354         ifp = sc_if->msk_ifp;
 4355         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4356                 return;
 4357         sc = sc_if->msk_softc;
 4358         stats = &sc_if->msk_stats;
 4359         /* Set MIB Clear Counter Mode. */
 4360         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4361         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4362 
 4363         /* Rx stats. */
 4364         stats->rx_ucast_frames +=
 4365             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
 4366         stats->rx_bcast_frames +=
 4367             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
 4368         stats->rx_pause_frames +=
 4369             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
 4370         stats->rx_mcast_frames +=
 4371             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
 4372         stats->rx_crc_errs +=
 4373             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
 4374         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
 4375         stats->rx_good_octets +=
 4376             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
 4377         stats->rx_bad_octets +=
 4378             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
 4379         stats->rx_runts +=
 4380             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
 4381         stats->rx_runt_errs +=
 4382             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
 4383         stats->rx_pkts_64 +=
 4384             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
 4385         stats->rx_pkts_65_127 +=
 4386             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
 4387         stats->rx_pkts_128_255 +=
 4388             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
 4389         stats->rx_pkts_256_511 +=
 4390             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
 4391         stats->rx_pkts_512_1023 +=
 4392             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
 4393         stats->rx_pkts_1024_1518 +=
 4394             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
 4395         stats->rx_pkts_1519_max +=
 4396             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
 4397         stats->rx_pkts_too_long +=
 4398             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
 4399         stats->rx_pkts_jabbers +=
 4400             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
 4401         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
 4402         stats->rx_fifo_oflows +=
 4403             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
 4404         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
 4405 
 4406         /* Tx stats. */
 4407         stats->tx_ucast_frames +=
 4408             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
 4409         stats->tx_bcast_frames +=
 4410             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
 4411         stats->tx_pause_frames +=
 4412             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
 4413         stats->tx_mcast_frames +=
 4414             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
 4415         stats->tx_octets +=
 4416             MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
 4417         stats->tx_pkts_64 +=
 4418             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
 4419         stats->tx_pkts_65_127 +=
 4420             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
 4421         stats->tx_pkts_128_255 +=
 4422             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
 4423         stats->tx_pkts_256_511 +=
 4424             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
 4425         stats->tx_pkts_512_1023 +=
 4426             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
 4427         stats->tx_pkts_1024_1518 +=
 4428             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
 4429         stats->tx_pkts_1519_max +=
 4430             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
 4431         reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
 4432         stats->tx_colls +=
 4433             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
 4434         stats->tx_late_colls +=
 4435             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
 4436         stats->tx_excess_colls +=
 4437             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
 4438         stats->tx_multi_colls +=
 4439             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
 4440         stats->tx_single_colls +=
 4441             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
 4442         stats->tx_underflows +=
 4443             MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
 4444         /* Clear MIB Clear Counter Mode. */
 4445         gmac &= ~GM_PAR_MIB_CLR;
 4446         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4447 }
 4448 
 4449 static int
 4450 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
 4451 {
 4452         struct msk_softc *sc;
 4453         struct msk_if_softc *sc_if;
 4454         uint32_t result, *stat;
 4455         int off;
 4456 
 4457         sc_if = (struct msk_if_softc *)arg1;
 4458         sc = sc_if->msk_softc;
 4459         off = arg2;
 4460         stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
 4461 
 4462         MSK_IF_LOCK(sc_if);
 4463         result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4464         result += *stat;
 4465         MSK_IF_UNLOCK(sc_if);
 4466 
 4467         return (sysctl_handle_int(oidp, &result, 0, req));
 4468 }
 4469 
 4470 static int
 4471 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
 4472 {
 4473         struct msk_softc *sc;
 4474         struct msk_if_softc *sc_if;
 4475         uint64_t result, *stat;
 4476         int off;
 4477 
 4478         sc_if = (struct msk_if_softc *)arg1;
 4479         sc = sc_if->msk_softc;
 4480         off = arg2;
 4481         stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
 4482 
 4483         MSK_IF_LOCK(sc_if);
 4484         result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4485         result += *stat;
 4486         MSK_IF_UNLOCK(sc_if);
 4487 
 4488         return (sysctl_handle_64(oidp, &result, 0, req));
 4489 }
 4490 
 4491 #undef MSK_READ_MIB32
 4492 #undef MSK_READ_MIB64
 4493 
 4494 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)                            \
 4495         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4496             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,    \
 4497             "IU", d)
 4498 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)                            \
 4499         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD,    \
 4500             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,    \
 4501             "QU", d)
 4502 
 4503 static void
 4504 msk_sysctl_node(struct msk_if_softc *sc_if)
 4505 {
 4506         struct sysctl_ctx_list *ctx;
 4507         struct sysctl_oid_list *child, *schild;
 4508         struct sysctl_oid *tree;
 4509 
 4510         ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
 4511         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
 4512 
 4513         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 4514             NULL, "MSK Statistics");
 4515         schild = child = SYSCTL_CHILDREN(tree);
 4516         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
 4517             NULL, "MSK RX Statistics");
 4518         child = SYSCTL_CHILDREN(tree);
 4519         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4520             child, rx_ucast_frames, "Good unicast frames");
 4521         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4522             child, rx_bcast_frames, "Good broadcast frames");
 4523         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4524             child, rx_pause_frames, "Pause frames");
 4525         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4526             child, rx_mcast_frames, "Multicast frames");
 4527         MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
 4528             child, rx_crc_errs, "CRC errors");
 4529         MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
 4530             child, rx_good_octets, "Good octets");
 4531         MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
 4532             child, rx_bad_octets, "Bad octets");
 4533         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4534             child, rx_pkts_64, "64 bytes frames");
 4535         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4536             child, rx_pkts_65_127, "65 to 127 bytes frames");
 4537         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4538             child, rx_pkts_128_255, "128 to 255 bytes frames");
 4539         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4540             child, rx_pkts_256_511, "256 to 511 bytes frames");
 4541         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4542             child, rx_pkts_512_1023, "512 to 1023 bytes frames");
 4543         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4544             child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4545         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4546             child, rx_pkts_1519_max, "1519 to max frames");
 4547         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
 4548             child, rx_pkts_too_long, "frames too long");
 4549         MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
 4550             child, rx_pkts_jabbers, "Jabber errors");
 4551         MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
 4552             child, rx_fifo_oflows, "FIFO overflows");
 4553 
 4554         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
 4555             NULL, "MSK TX Statistics");
 4556         child = SYSCTL_CHILDREN(tree);
 4557         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4558             child, tx_ucast_frames, "Unicast frames");
 4559         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4560             child, tx_bcast_frames, "Broadcast frames");
 4561         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4562             child, tx_pause_frames, "Pause frames");
 4563         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4564             child, tx_mcast_frames, "Multicast frames");
 4565         MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
 4566             child, tx_octets, "Octets");
 4567         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4568             child, tx_pkts_64, "64 bytes frames");
 4569         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4570             child, tx_pkts_65_127, "65 to 127 bytes frames");
 4571         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4572             child, tx_pkts_128_255, "128 to 255 bytes frames");
 4573         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4574             child, tx_pkts_256_511, "256 to 511 bytes frames");
 4575         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4576             child, tx_pkts_512_1023, "512 to 1023 bytes frames");
 4577         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4578             child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4579         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4580             child, tx_pkts_1519_max, "1519 to max frames");
 4581         MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
 4582             child, tx_colls, "Collisions");
 4583         MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
 4584             child, tx_late_colls, "Late collisions");
 4585         MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
 4586             child, tx_excess_colls, "Excessive collisions");
 4587         MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
 4588             child, tx_multi_colls, "Multiple collisions");
 4589         MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
 4590             child, tx_single_colls, "Single collisions");
 4591         MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
 4592             child, tx_underflows, "FIFO underflows");
 4593 }
 4594 
 4595 #undef MSK_SYSCTL_STAT32
 4596 #undef MSK_SYSCTL_STAT64
 4597 
 4598 static int
 4599 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 4600 {
 4601         int error, value;
 4602 
 4603         if (!arg1)
 4604                 return (EINVAL);
 4605         value = *(int *)arg1;
 4606         error = sysctl_handle_int(oidp, &value, 0, req);
 4607         if (error || !req->newptr)
 4608                 return (error);
 4609         if (value < low || value > high)
 4610                 return (EINVAL);
 4611         *(int *)arg1 = value;
 4612 
 4613         return (0);
 4614 }
 4615 
 4616 static int
 4617 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
 4618 {
 4619 
 4620         return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
 4621             MSK_PROC_MAX));
 4622 }

Cache object: 4ec07b72394290e505d3bb6ce4acee9d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.