The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * Copyright (c) 1997, 1998, 1999, 2000
   50  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   51  *
   52  * Redistribution and use in source and binary forms, with or without
   53  * modification, are permitted provided that the following conditions
   54  * are met:
   55  * 1. Redistributions of source code must retain the above copyright
   56  *    notice, this list of conditions and the following disclaimer.
   57  * 2. Redistributions in binary form must reproduce the above copyright
   58  *    notice, this list of conditions and the following disclaimer in the
   59  *    documentation and/or other materials provided with the distribution.
   60  * 3. All advertising materials mentioning features or use of this software
   61  *    must display the following acknowledgement:
   62  *      This product includes software developed by Bill Paul.
   63  * 4. Neither the name of the author nor the names of any co-contributors
   64  *    may be used to endorse or promote products derived from this software
   65  *    without specific prior written permission.
   66  *
   67  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   70  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   71  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   72  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   73  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   74  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   75  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   76  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   77  * THE POSSIBILITY OF SUCH DAMAGE.
   78  */
   79 /*-
   80  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   81  *
   82  * Permission to use, copy, modify, and distribute this software for any
   83  * purpose with or without fee is hereby granted, provided that the above
   84  * copyright notice and this permission notice appear in all copies.
   85  *
   86  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   87  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   88  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   89  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   90  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   91  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   92  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   93  */
   94 
   95 /*
   96  * Device driver for the Marvell Yukon II Ethernet controller.
   97  * Due to lack of documentation, this driver is based on the code from
   98  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
   99  */
  100 
  101 #include <sys/cdefs.h>
  102 __FBSDID("$FreeBSD$");
  103 
  104 #include <sys/param.h>
  105 #include <sys/systm.h>
  106 #include <sys/bus.h>
  107 #include <sys/endian.h>
  108 #include <sys/mbuf.h>
  109 #include <sys/malloc.h>
  110 #include <sys/kernel.h>
  111 #include <sys/module.h>
  112 #include <sys/socket.h>
  113 #include <sys/sockio.h>
  114 #include <sys/queue.h>
  115 #include <sys/sysctl.h>
  116 
  117 #include <net/bpf.h>
  118 #include <net/ethernet.h>
  119 #include <net/if.h>
  120 #include <net/if_arp.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/if_types.h>
  124 #include <net/if_vlan_var.h>
  125 
  126 #include <netinet/in.h>
  127 #include <netinet/in_systm.h>
  128 #include <netinet/ip.h>
  129 #include <netinet/tcp.h>
  130 #include <netinet/udp.h>
  131 
  132 #include <machine/bus.h>
  133 #include <machine/in_cksum.h>
  134 #include <machine/resource.h>
  135 #include <sys/rman.h>
  136 
  137 #include <dev/mii/mii.h>
  138 #include <dev/mii/miivar.h>
  139 
  140 #include <dev/pci/pcireg.h>
  141 #include <dev/pci/pcivar.h>
  142 
  143 #include <dev/msk/if_mskreg.h>
  144 
  145 MODULE_DEPEND(msk, pci, 1, 1, 1);
  146 MODULE_DEPEND(msk, ether, 1, 1, 1);
  147 MODULE_DEPEND(msk, miibus, 1, 1, 1);
  148 
  149 /* "device miibus" required.  See GENERIC if you get errors here. */
  150 #include "miibus_if.h"
  151 
  152 /* Tunables. */
  153 static int msi_disable = 0;
  154 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
  155 static int legacy_intr = 0;
  156 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
  157 static int jumbo_disable = 0;
  158 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
  159 
  160 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  161 
  162 /*
  163  * Devices supported by this driver.
  164  */
  165 static struct msk_product {
  166         uint16_t        msk_vendorid;
  167         uint16_t        msk_deviceid;
  168         const char      *msk_name;
  169 } msk_products[] = {
  170         { VENDORID_SK, DEVICEID_SK_YUKON2,
  171             "SK-9Sxx Gigabit Ethernet" },
  172         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  173             "SK-9Exx Gigabit Ethernet"},
  174         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  175             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  176         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  177             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  178         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  179             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  180         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  181             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  182         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  183             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  184         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  185             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  186         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  187             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  188         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  189             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  190         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  191             "Marvell Yukon 88E8035 Fast Ethernet" },
  192         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  193             "Marvell Yukon 88E8036 Fast Ethernet" },
  194         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  195             "Marvell Yukon 88E8038 Fast Ethernet" },
  196         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  197             "Marvell Yukon 88E8039 Fast Ethernet" },
  198         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  199             "Marvell Yukon 88E8040 Fast Ethernet" },
  200         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  201             "Marvell Yukon 88E8040T Fast Ethernet" },
  202         { VENDORID_MARVELL, DEVICEID_MRVL_8042,
  203             "Marvell Yukon 88E8042 Fast Ethernet" },
  204         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  205             "Marvell Yukon 88E8048 Fast Ethernet" },
  206         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  207             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  208         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  209             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  210         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  211             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  212         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  213             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  214         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  215             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  216         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  217             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  218         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  219             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  220         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  221             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  222         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  223             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  224         { VENDORID_MARVELL, DEVICEID_MRVL_436D,
  225             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  226         { VENDORID_MARVELL, DEVICEID_MRVL_4370,
  227             "Marvell Yukon 88E8075 Gigabit Ethernet" },
  228         { VENDORID_MARVELL, DEVICEID_MRVL_4380,
  229             "Marvell Yukon 88E8057 Gigabit Ethernet" },
  230         { VENDORID_MARVELL, DEVICEID_MRVL_4381,
  231             "Marvell Yukon 88E8059 Gigabit Ethernet" },
  232         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  233             "D-Link 550SX Gigabit Ethernet" },
  234         { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
  235             "D-Link 560SX Gigabit Ethernet" },
  236         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  237             "D-Link 560T Gigabit Ethernet" }
  238 };
  239 
  240 static const char *model_name[] = {
  241         "Yukon XL",
  242         "Yukon EC Ultra",
  243         "Yukon EX",
  244         "Yukon EC",
  245         "Yukon FE",
  246         "Yukon FE+",
  247         "Yukon Supreme",
  248         "Yukon Ultra 2",
  249         "Yukon Unknown",
  250         "Yukon Optima",
  251 };
  252 
  253 static int mskc_probe(device_t);
  254 static int mskc_attach(device_t);
  255 static int mskc_detach(device_t);
  256 static int mskc_shutdown(device_t);
  257 static int mskc_setup_rambuffer(struct msk_softc *);
  258 static int mskc_suspend(device_t);
  259 static int mskc_resume(device_t);
  260 static void mskc_reset(struct msk_softc *);
  261 
  262 static int msk_probe(device_t);
  263 static int msk_attach(device_t);
  264 static int msk_detach(device_t);
  265 
  266 static void msk_tick(void *);
  267 static void msk_intr(void *);
  268 static void msk_intr_phy(struct msk_if_softc *);
  269 static void msk_intr_gmac(struct msk_if_softc *);
  270 static __inline void msk_rxput(struct msk_if_softc *);
  271 static int msk_handle_events(struct msk_softc *);
  272 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  273 static void msk_intr_hwerr(struct msk_softc *);
  274 #ifndef __NO_STRICT_ALIGNMENT
  275 static __inline void msk_fixup_rx(struct mbuf *);
  276 #endif
  277 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
  278 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  279 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  280 static void msk_txeof(struct msk_if_softc *, int);
  281 static int msk_encap(struct msk_if_softc *, struct mbuf **);
  282 static void msk_start(struct ifnet *);
  283 static void msk_start_locked(struct ifnet *);
  284 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
  285 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  286 static void msk_set_rambuffer(struct msk_if_softc *);
  287 static void msk_set_tx_stfwd(struct msk_if_softc *);
  288 static void msk_init(void *);
  289 static void msk_init_locked(struct msk_if_softc *);
  290 static void msk_stop(struct msk_if_softc *);
  291 static void msk_watchdog(struct msk_if_softc *);
  292 static int msk_mediachange(struct ifnet *);
  293 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
  294 static void msk_phy_power(struct msk_softc *, int);
  295 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  296 static int msk_status_dma_alloc(struct msk_softc *);
  297 static void msk_status_dma_free(struct msk_softc *);
  298 static int msk_txrx_dma_alloc(struct msk_if_softc *);
  299 static int msk_rx_dma_jalloc(struct msk_if_softc *);
  300 static void msk_txrx_dma_free(struct msk_if_softc *);
  301 static void msk_rx_dma_jfree(struct msk_if_softc *);
  302 static int msk_rx_fill(struct msk_if_softc *, int);
  303 static int msk_init_rx_ring(struct msk_if_softc *);
  304 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  305 static void msk_init_tx_ring(struct msk_if_softc *);
  306 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
  307 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  308 static int msk_newbuf(struct msk_if_softc *, int);
  309 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  310 
  311 static int msk_phy_readreg(struct msk_if_softc *, int, int);
  312 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
  313 static int msk_miibus_readreg(device_t, int, int);
  314 static int msk_miibus_writereg(device_t, int, int, int);
  315 static void msk_miibus_statchg(device_t);
  316 
  317 static void msk_rxfilter(struct msk_if_softc *);
  318 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
  319 
  320 static void msk_stats_clear(struct msk_if_softc *);
  321 static void msk_stats_update(struct msk_if_softc *);
  322 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
  323 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
  324 static void msk_sysctl_node(struct msk_if_softc *);
  325 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  326 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
  327 
  328 static device_method_t mskc_methods[] = {
  329         /* Device interface */
  330         DEVMETHOD(device_probe,         mskc_probe),
  331         DEVMETHOD(device_attach,        mskc_attach),
  332         DEVMETHOD(device_detach,        mskc_detach),
  333         DEVMETHOD(device_suspend,       mskc_suspend),
  334         DEVMETHOD(device_resume,        mskc_resume),
  335         DEVMETHOD(device_shutdown,      mskc_shutdown),
  336 
  337         /* bus interface */
  338         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  339         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  340 
  341         { NULL, NULL }
  342 };
  343 
  344 static driver_t mskc_driver = {
  345         "mskc",
  346         mskc_methods,
  347         sizeof(struct msk_softc)
  348 };
  349 
  350 static devclass_t mskc_devclass;
  351 
  352 static device_method_t msk_methods[] = {
  353         /* Device interface */
  354         DEVMETHOD(device_probe,         msk_probe),
  355         DEVMETHOD(device_attach,        msk_attach),
  356         DEVMETHOD(device_detach,        msk_detach),
  357         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  358 
  359         /* bus interface */
  360         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  361         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  362 
  363         /* MII interface */
  364         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  365         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  366         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  367 
  368         { NULL, NULL }
  369 };
  370 
  371 static driver_t msk_driver = {
  372         "msk",
  373         msk_methods,
  374         sizeof(struct msk_if_softc)
  375 };
  376 
  377 static devclass_t msk_devclass;
  378 
  379 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0);
  380 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0);
  381 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
  382 
  383 static struct resource_spec msk_res_spec_io[] = {
  384         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  385         { -1,                   0,              0 }
  386 };
  387 
  388 static struct resource_spec msk_res_spec_mem[] = {
  389         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  390         { -1,                   0,              0 }
  391 };
  392 
  393 static struct resource_spec msk_irq_spec_legacy[] = {
  394         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  395         { -1,                   0,              0 }
  396 };
  397 
  398 static struct resource_spec msk_irq_spec_msi[] = {
  399         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  400         { -1,                   0,              0 }
  401 };
  402 
  403 static int
  404 msk_miibus_readreg(device_t dev, int phy, int reg)
  405 {
  406         struct msk_if_softc *sc_if;
  407 
  408         sc_if = device_get_softc(dev);
  409 
  410         return (msk_phy_readreg(sc_if, phy, reg));
  411 }
  412 
  413 static int
  414 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  415 {
  416         struct msk_softc *sc;
  417         int i, val;
  418 
  419         sc = sc_if->msk_softc;
  420 
  421         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  422             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  423 
  424         for (i = 0; i < MSK_TIMEOUT; i++) {
  425                 DELAY(1);
  426                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  427                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  428                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  429                         break;
  430                 }
  431         }
  432 
  433         if (i == MSK_TIMEOUT) {
  434                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  435                 val = 0;
  436         }
  437 
  438         return (val);
  439 }
  440 
  441 static int
  442 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  443 {
  444         struct msk_if_softc *sc_if;
  445 
  446         sc_if = device_get_softc(dev);
  447 
  448         return (msk_phy_writereg(sc_if, phy, reg, val));
  449 }
  450 
  451 static int
  452 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  453 {
  454         struct msk_softc *sc;
  455         int i;
  456 
  457         sc = sc_if->msk_softc;
  458 
  459         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  460         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  461             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  462         for (i = 0; i < MSK_TIMEOUT; i++) {
  463                 DELAY(1);
  464                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  465                     GM_SMI_CT_BUSY) == 0)
  466                         break;
  467         }
  468         if (i == MSK_TIMEOUT)
  469                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  470 
  471         return (0);
  472 }
  473 
  474 static void
  475 msk_miibus_statchg(device_t dev)
  476 {
  477         struct msk_softc *sc;
  478         struct msk_if_softc *sc_if;
  479         struct mii_data *mii;
  480         struct ifnet *ifp;
  481         uint32_t gmac;
  482 
  483         sc_if = device_get_softc(dev);
  484         sc = sc_if->msk_softc;
  485 
  486         MSK_IF_LOCK_ASSERT(sc_if);
  487 
  488         mii = device_get_softc(sc_if->msk_miibus);
  489         ifp = sc_if->msk_ifp;
  490         if (mii == NULL || ifp == NULL ||
  491             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  492                 return;
  493 
  494         sc_if->msk_flags &= ~MSK_FLAG_LINK;
  495         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  496             (IFM_AVALID | IFM_ACTIVE)) {
  497                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  498                 case IFM_10_T:
  499                 case IFM_100_TX:
  500                         sc_if->msk_flags |= MSK_FLAG_LINK;
  501                         break;
  502                 case IFM_1000_T:
  503                 case IFM_1000_SX:
  504                 case IFM_1000_LX:
  505                 case IFM_1000_CX:
  506                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  507                                 sc_if->msk_flags |= MSK_FLAG_LINK;
  508                         break;
  509                 default:
  510                         break;
  511                 }
  512         }
  513 
  514         if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
  515                 /* Enable Tx FIFO Underrun. */
  516                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  517                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  518                 /*
  519                  * Because mii(4) notify msk(4) that it detected link status
  520                  * change, there is no need to enable automatic
  521                  * speed/flow-control/duplex updates.
  522                  */
  523                 gmac = GM_GPCR_AU_ALL_DIS;
  524                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  525                 case IFM_1000_SX:
  526                 case IFM_1000_T:
  527                         gmac |= GM_GPCR_SPEED_1000;
  528                         break;
  529                 case IFM_100_TX:
  530                         gmac |= GM_GPCR_SPEED_100;
  531                         break;
  532                 case IFM_10_T:
  533                         break;
  534                 }
  535 
  536                 if ((IFM_OPTIONS(mii->mii_media_active) &
  537                     IFM_ETH_RXPAUSE) == 0)
  538                         gmac |= GM_GPCR_FC_RX_DIS;
  539                 if ((IFM_OPTIONS(mii->mii_media_active) &
  540                      IFM_ETH_TXPAUSE) == 0)
  541                         gmac |= GM_GPCR_FC_TX_DIS;
  542                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  543                         gmac |= GM_GPCR_DUP_FULL;
  544                 else
  545                         gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
  546                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  547                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  548                 /* Read again to ensure writing. */
  549                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  550                 gmac = GMC_PAUSE_OFF;
  551                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  552                         if ((IFM_OPTIONS(mii->mii_media_active) &
  553                             IFM_ETH_RXPAUSE) != 0)
  554                                 gmac = GMC_PAUSE_ON;
  555                 }
  556                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  557 
  558                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  559                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  560                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  561         } else {
  562                 /*
  563                  * Link state changed to down.
  564                  * Disable PHY interrupts.
  565                  */
  566                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  567                 /* Disable Rx/Tx MAC. */
  568                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  569                 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
  570                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  571                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  572                         /* Read again to ensure writing. */
  573                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  574                 }
  575         }
  576 }
  577 
  578 static void
  579 msk_rxfilter(struct msk_if_softc *sc_if)
  580 {
  581         struct msk_softc *sc;
  582         struct ifnet *ifp;
  583         struct ifmultiaddr *ifma;
  584         uint32_t mchash[2];
  585         uint32_t crc;
  586         uint16_t mode;
  587 
  588         sc = sc_if->msk_softc;
  589 
  590         MSK_IF_LOCK_ASSERT(sc_if);
  591 
  592         ifp = sc_if->msk_ifp;
  593 
  594         bzero(mchash, sizeof(mchash));
  595         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  596         if ((ifp->if_flags & IFF_PROMISC) != 0)
  597                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  598         else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  599                 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  600                 mchash[0] = 0xffff;
  601                 mchash[1] = 0xffff;
  602         } else {
  603                 mode |= GM_RXCR_UCF_ENA;
  604                 IF_ADDR_LOCK(ifp);
  605                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  606                         if (ifma->ifma_addr->sa_family != AF_LINK)
  607                                 continue;
  608                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  609                             ifma->ifma_addr), ETHER_ADDR_LEN);
  610                         /* Just want the 6 least significant bits. */
  611                         crc &= 0x3f;
  612                         /* Set the corresponding bit in the hash table. */
  613                         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  614                 }
  615                 IF_ADDR_UNLOCK(ifp);
  616                 if (mchash[0] != 0 || mchash[1] != 0)
  617                         mode |= GM_RXCR_MCF_ENA;
  618         }
  619 
  620         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  621             mchash[0] & 0xffff);
  622         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  623             (mchash[0] >> 16) & 0xffff);
  624         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  625             mchash[1] & 0xffff);
  626         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  627             (mchash[1] >> 16) & 0xffff);
  628         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  629 }
  630 
  631 static void
  632 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  633 {
  634         struct msk_softc *sc;
  635 
  636         sc = sc_if->msk_softc;
  637         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  638                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  639                     RX_VLAN_STRIP_ON);
  640                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  641                     TX_VLAN_TAG_ON);
  642         } else {
  643                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  644                     RX_VLAN_STRIP_OFF);
  645                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  646                     TX_VLAN_TAG_OFF);
  647         }
  648 }
  649 
  650 static int
  651 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
  652 {
  653         uint16_t idx;
  654         int i;
  655 
  656         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  657             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  658                 /* Wait until controller executes OP_TCPSTART command. */
  659                 for (i = 100; i > 0; i--) {
  660                         DELAY(100);
  661                         idx = CSR_READ_2(sc_if->msk_softc,
  662                             Y2_PREF_Q_ADDR(sc_if->msk_rxq,
  663                             PREF_UNIT_GET_IDX_REG));
  664                         if (idx != 0)
  665                                 break;
  666                 }
  667                 if (i == 0) {
  668                         device_printf(sc_if->msk_if_dev,
  669                             "prefetch unit stuck?\n");
  670                         return (ETIMEDOUT);
  671                 }
  672                 /*
  673                  * Fill consumed LE with free buffer. This can be done
  674                  * in Rx handler but we don't want to add special code
  675                  * in fast handler.
  676                  */
  677                 if (jumbo > 0) {
  678                         if (msk_jumbo_newbuf(sc_if, 0) != 0)
  679                                 return (ENOBUFS);
  680                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  681                             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  682                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  683                 } else {
  684                         if (msk_newbuf(sc_if, 0) != 0)
  685                                 return (ENOBUFS);
  686                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  687                             sc_if->msk_cdata.msk_rx_ring_map,
  688                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  689                 }
  690                 sc_if->msk_cdata.msk_rx_prod = 0;
  691                 CSR_WRITE_2(sc_if->msk_softc,
  692                     Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  693                     sc_if->msk_cdata.msk_rx_prod);
  694         }
  695         return (0);
  696 }
  697 
  698 static int
  699 msk_init_rx_ring(struct msk_if_softc *sc_if)
  700 {
  701         struct msk_ring_data *rd;
  702         struct msk_rxdesc *rxd;
  703         int i, nbuf, prod;
  704 
  705         MSK_IF_LOCK_ASSERT(sc_if);
  706 
  707         sc_if->msk_cdata.msk_rx_cons = 0;
  708         sc_if->msk_cdata.msk_rx_prod = 0;
  709         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  710 
  711         rd = &sc_if->msk_rdata;
  712         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  713         for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
  714                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  715                 rxd->rx_m = NULL;
  716                 rxd->rx_le = &rd->msk_rx_ring[prod];
  717                 MSK_INC(prod, MSK_RX_RING_CNT);
  718         }
  719         nbuf = MSK_RX_BUF_CNT;
  720         prod = 0;
  721         /* Have controller know how to compute Rx checksum. */
  722         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  723             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  724 #ifdef MSK_64BIT_DMA
  725                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  726                 rxd->rx_m = NULL;
  727                 rxd->rx_le = &rd->msk_rx_ring[prod];
  728                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  729                     ETHER_HDR_LEN);
  730                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  731                 MSK_INC(prod, MSK_RX_RING_CNT);
  732                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  733 #endif
  734                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  735                 rxd->rx_m = NULL;
  736                 rxd->rx_le = &rd->msk_rx_ring[prod];
  737                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  738                     ETHER_HDR_LEN);
  739                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  740                 MSK_INC(prod, MSK_RX_RING_CNT);
  741                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  742                 nbuf--;
  743         }
  744         for (i = 0; i < nbuf; i++) {
  745                 if (msk_newbuf(sc_if, prod) != 0)
  746                         return (ENOBUFS);
  747                 MSK_RX_INC(prod, MSK_RX_RING_CNT);
  748         }
  749 
  750         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  751             sc_if->msk_cdata.msk_rx_ring_map,
  752             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  753 
  754         /* Update prefetch unit. */
  755         sc_if->msk_cdata.msk_rx_prod = prod;
  756         CSR_WRITE_2(sc_if->msk_softc,
  757             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  758             (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
  759             MSK_RX_RING_CNT);
  760         if (msk_rx_fill(sc_if, 0) != 0)
  761                 return (ENOBUFS);
  762         return (0);
  763 }
  764 
  765 static int
  766 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  767 {
  768         struct msk_ring_data *rd;
  769         struct msk_rxdesc *rxd;
  770         int i, nbuf, prod;
  771 
  772         MSK_IF_LOCK_ASSERT(sc_if);
  773 
  774         sc_if->msk_cdata.msk_rx_cons = 0;
  775         sc_if->msk_cdata.msk_rx_prod = 0;
  776         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  777 
  778         rd = &sc_if->msk_rdata;
  779         bzero(rd->msk_jumbo_rx_ring,
  780             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  781         for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  782                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  783                 rxd->rx_m = NULL;
  784                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  785                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  786         }
  787         nbuf = MSK_RX_BUF_CNT;
  788         prod = 0;
  789         /* Have controller know how to compute Rx checksum. */
  790         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  791             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  792 #ifdef MSK_64BIT_DMA
  793                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  794                 rxd->rx_m = NULL;
  795                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  796                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  797                     ETHER_HDR_LEN);
  798                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  799                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  800                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  801 #endif
  802                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  803                 rxd->rx_m = NULL;
  804                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  805                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  806                     ETHER_HDR_LEN);
  807                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  808                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  809                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  810                 nbuf--;
  811         }
  812         for (i = 0; i < nbuf; i++) {
  813                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  814                         return (ENOBUFS);
  815                 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
  816         }
  817 
  818         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  819             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  820             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  821 
  822         /* Update prefetch unit. */
  823         sc_if->msk_cdata.msk_rx_prod = prod;
  824         CSR_WRITE_2(sc_if->msk_softc,
  825             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  826             (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
  827             MSK_JUMBO_RX_RING_CNT);
  828         if (msk_rx_fill(sc_if, 1) != 0)
  829                 return (ENOBUFS);
  830         return (0);
  831 }
  832 
  833 static void
  834 msk_init_tx_ring(struct msk_if_softc *sc_if)
  835 {
  836         struct msk_ring_data *rd;
  837         struct msk_txdesc *txd;
  838         int i;
  839 
  840         sc_if->msk_cdata.msk_tso_mtu = 0;
  841         sc_if->msk_cdata.msk_last_csum = 0;
  842         sc_if->msk_cdata.msk_tx_prod = 0;
  843         sc_if->msk_cdata.msk_tx_cons = 0;
  844         sc_if->msk_cdata.msk_tx_cnt = 0;
  845         sc_if->msk_cdata.msk_tx_high_addr = 0;
  846 
  847         rd = &sc_if->msk_rdata;
  848         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  849         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  850                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  851                 txd->tx_m = NULL;
  852                 txd->tx_le = &rd->msk_tx_ring[i];
  853         }
  854 
  855         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
  856             sc_if->msk_cdata.msk_tx_ring_map,
  857             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  858 }
  859 
  860 static __inline void
  861 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  862 {
  863         struct msk_rx_desc *rx_le;
  864         struct msk_rxdesc *rxd;
  865         struct mbuf *m;
  866 
  867 #ifdef MSK_64BIT_DMA
  868         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  869         rx_le = rxd->rx_le;
  870         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  871         MSK_INC(idx, MSK_RX_RING_CNT);
  872 #endif
  873         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  874         m = rxd->rx_m;
  875         rx_le = rxd->rx_le;
  876         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  877 }
  878 
  879 static __inline void
  880 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  881 {
  882         struct msk_rx_desc *rx_le;
  883         struct msk_rxdesc *rxd;
  884         struct mbuf *m;
  885 
  886 #ifdef MSK_64BIT_DMA
  887         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  888         rx_le = rxd->rx_le;
  889         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  890         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  891 #endif
  892         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  893         m = rxd->rx_m;
  894         rx_le = rxd->rx_le;
  895         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  896 }
  897 
  898 static int
  899 msk_newbuf(struct msk_if_softc *sc_if, int idx)
  900 {
  901         struct msk_rx_desc *rx_le;
  902         struct msk_rxdesc *rxd;
  903         struct mbuf *m;
  904         bus_dma_segment_t segs[1];
  905         bus_dmamap_t map;
  906         int nsegs;
  907 
  908         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
  909         if (m == NULL)
  910                 return (ENOBUFS);
  911 
  912         m->m_len = m->m_pkthdr.len = MCLBYTES;
  913         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  914                 m_adj(m, ETHER_ALIGN);
  915 #ifndef __NO_STRICT_ALIGNMENT
  916         else
  917                 m_adj(m, MSK_RX_BUF_ALIGN);
  918 #endif
  919 
  920         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
  921             sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
  922             BUS_DMA_NOWAIT) != 0) {
  923                 m_freem(m);
  924                 return (ENOBUFS);
  925         }
  926         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  927 
  928         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  929 #ifdef MSK_64BIT_DMA
  930         rx_le = rxd->rx_le;
  931         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  932         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  933         MSK_INC(idx, MSK_RX_RING_CNT);
  934         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  935 #endif
  936         if (rxd->rx_m != NULL) {
  937                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  938                     BUS_DMASYNC_POSTREAD);
  939                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  940                 rxd->rx_m = NULL;
  941         }
  942         map = rxd->rx_dmamap;
  943         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  944         sc_if->msk_cdata.msk_rx_sparemap = map;
  945         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  946             BUS_DMASYNC_PREREAD);
  947         rxd->rx_m = m;
  948         rx_le = rxd->rx_le;
  949         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  950         rx_le->msk_control =
  951             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  952 
  953         return (0);
  954 }
  955 
  956 static int
  957 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  958 {
  959         struct msk_rx_desc *rx_le;
  960         struct msk_rxdesc *rxd;
  961         struct mbuf *m;
  962         bus_dma_segment_t segs[1];
  963         bus_dmamap_t map;
  964         int nsegs;
  965 
  966         m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
  967         if (m == NULL)
  968                 return (ENOBUFS);
  969         if ((m->m_flags & M_EXT) == 0) {
  970                 m_freem(m);
  971                 return (ENOBUFS);
  972         }
  973         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
  974         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  975                 m_adj(m, ETHER_ALIGN);
  976 #ifndef __NO_STRICT_ALIGNMENT
  977         else
  978                 m_adj(m, MSK_RX_BUF_ALIGN);
  979 #endif
  980 
  981         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  982             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  983             BUS_DMA_NOWAIT) != 0) {
  984                 m_freem(m);
  985                 return (ENOBUFS);
  986         }
  987         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  988 
  989         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  990 #ifdef MSK_64BIT_DMA
  991         rx_le = rxd->rx_le;
  992         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  993         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  994         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  995         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  996 #endif
  997         if (rxd->rx_m != NULL) {
  998                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  999                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 1000                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 1001                     rxd->rx_dmamap);
 1002                 rxd->rx_m = NULL;
 1003         }
 1004         map = rxd->rx_dmamap;
 1005         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
 1006         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
 1007         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
 1008             BUS_DMASYNC_PREREAD);
 1009         rxd->rx_m = m;
 1010         rx_le = rxd->rx_le;
 1011         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
 1012         rx_le->msk_control =
 1013             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
 1014 
 1015         return (0);
 1016 }
 1017 
 1018 /*
 1019  * Set media options.
 1020  */
 1021 static int
 1022 msk_mediachange(struct ifnet *ifp)
 1023 {
 1024         struct msk_if_softc *sc_if;
 1025         struct mii_data *mii;
 1026         int error;
 1027 
 1028         sc_if = ifp->if_softc;
 1029 
 1030         MSK_IF_LOCK(sc_if);
 1031         mii = device_get_softc(sc_if->msk_miibus);
 1032         error = mii_mediachg(mii);
 1033         MSK_IF_UNLOCK(sc_if);
 1034 
 1035         return (error);
 1036 }
 1037 
 1038 /*
 1039  * Report current media status.
 1040  */
 1041 static void
 1042 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1043 {
 1044         struct msk_if_softc *sc_if;
 1045         struct mii_data *mii;
 1046 
 1047         sc_if = ifp->if_softc;
 1048         MSK_IF_LOCK(sc_if);
 1049         if ((ifp->if_flags & IFF_UP) == 0) {
 1050                 MSK_IF_UNLOCK(sc_if);
 1051                 return;
 1052         }
 1053         mii = device_get_softc(sc_if->msk_miibus);
 1054 
 1055         mii_pollstat(mii);
 1056         ifmr->ifm_active = mii->mii_media_active;
 1057         ifmr->ifm_status = mii->mii_media_status;
 1058         MSK_IF_UNLOCK(sc_if);
 1059 }
 1060 
 1061 static int
 1062 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1063 {
 1064         struct msk_if_softc *sc_if;
 1065         struct ifreq *ifr;
 1066         struct mii_data *mii;
 1067         int error, mask, reinit;
 1068 
 1069         sc_if = ifp->if_softc;
 1070         ifr = (struct ifreq *)data;
 1071         error = 0;
 1072 
 1073         switch(command) {
 1074         case SIOCSIFMTU:
 1075                 MSK_IF_LOCK(sc_if);
 1076                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
 1077                         error = EINVAL;
 1078                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1079                         if (ifr->ifr_mtu > ETHERMTU) {
 1080                                 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 1081                                         error = EINVAL;
 1082                                         MSK_IF_UNLOCK(sc_if);
 1083                                         break;
 1084                                 }
 1085                                 if ((sc_if->msk_flags &
 1086                                     MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1087                                         ifp->if_hwassist &=
 1088                                             ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1089                                         ifp->if_capenable &=
 1090                                             ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1091                                         VLAN_CAPABILITIES(ifp);
 1092                                 }
 1093                         }
 1094                         ifp->if_mtu = ifr->ifr_mtu;
 1095                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1096                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1097                                 msk_init_locked(sc_if);
 1098                         }
 1099                 }
 1100                 MSK_IF_UNLOCK(sc_if);
 1101                 break;
 1102         case SIOCSIFFLAGS:
 1103                 MSK_IF_LOCK(sc_if);
 1104                 if ((ifp->if_flags & IFF_UP) != 0) {
 1105                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1106                             ((ifp->if_flags ^ sc_if->msk_if_flags) &
 1107                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1108                                 msk_rxfilter(sc_if);
 1109                         else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
 1110                                 msk_init_locked(sc_if);
 1111                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1112                         msk_stop(sc_if);
 1113                 sc_if->msk_if_flags = ifp->if_flags;
 1114                 MSK_IF_UNLOCK(sc_if);
 1115                 break;
 1116         case SIOCADDMULTI:
 1117         case SIOCDELMULTI:
 1118                 MSK_IF_LOCK(sc_if);
 1119                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1120                         msk_rxfilter(sc_if);
 1121                 MSK_IF_UNLOCK(sc_if);
 1122                 break;
 1123         case SIOCGIFMEDIA:
 1124         case SIOCSIFMEDIA:
 1125                 mii = device_get_softc(sc_if->msk_miibus);
 1126                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1127                 break;
 1128         case SIOCSIFCAP:
 1129                 reinit = 0;
 1130                 MSK_IF_LOCK(sc_if);
 1131                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1132                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1133                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1134                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1135                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 1136                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
 1137                         else
 1138                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
 1139                 }
 1140                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1141                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
 1142                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1143                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1144                                 reinit = 1;
 1145                 }
 1146                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1147                     (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
 1148                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1149                 if ((mask & IFCAP_TSO4) != 0 &&
 1150                     (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1151                         ifp->if_capenable ^= IFCAP_TSO4;
 1152                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1153                                 ifp->if_hwassist |= CSUM_TSO;
 1154                         else
 1155                                 ifp->if_hwassist &= ~CSUM_TSO;
 1156                 }
 1157                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1158                     (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
 1159                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1160                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1161                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1162                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1163                         if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
 1164                                 ifp->if_capenable &=
 1165                                     ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
 1166                         msk_setvlan(sc_if, ifp);
 1167                 }
 1168                 if (ifp->if_mtu > ETHERMTU &&
 1169                     (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1170                         ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1171                         ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1172                 }
 1173                 VLAN_CAPABILITIES(ifp);
 1174                 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1175                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1176                         msk_init_locked(sc_if);
 1177                 }
 1178                 MSK_IF_UNLOCK(sc_if);
 1179                 break;
 1180         default:
 1181                 error = ether_ioctl(ifp, command, data);
 1182                 break;
 1183         }
 1184 
 1185         return (error);
 1186 }
 1187 
 1188 static int
 1189 mskc_probe(device_t dev)
 1190 {
 1191         struct msk_product *mp;
 1192         uint16_t vendor, devid;
 1193         int i;
 1194 
 1195         vendor = pci_get_vendor(dev);
 1196         devid = pci_get_device(dev);
 1197         mp = msk_products;
 1198         for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]);
 1199             i++, mp++) {
 1200                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
 1201                         device_set_desc(dev, mp->msk_name);
 1202                         return (BUS_PROBE_DEFAULT);
 1203                 }
 1204         }
 1205 
 1206         return (ENXIO);
 1207 }
 1208 
 1209 static int
 1210 mskc_setup_rambuffer(struct msk_softc *sc)
 1211 {
 1212         int next;
 1213         int i;
 1214 
 1215         /* Get adapter SRAM size. */
 1216         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
 1217         if (bootverbose)
 1218                 device_printf(sc->msk_dev,
 1219                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
 1220         if (sc->msk_ramsize == 0)
 1221                 return (0);
 1222 
 1223         sc->msk_pflags |= MSK_FLAG_RAMBUF;
 1224         /*
 1225          * Give receiver 2/3 of memory and round down to the multiple
 1226          * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
 1227          * of 1024.
 1228          */
 1229         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
 1230         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
 1231         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
 1232                 sc->msk_rxqstart[i] = next;
 1233                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
 1234                 next = sc->msk_rxqend[i] + 1;
 1235                 sc->msk_txqstart[i] = next;
 1236                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
 1237                 next = sc->msk_txqend[i] + 1;
 1238                 if (bootverbose) {
 1239                         device_printf(sc->msk_dev,
 1240                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1241                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1242                             sc->msk_rxqend[i]);
 1243                         device_printf(sc->msk_dev,
 1244                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1245                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1246                             sc->msk_txqend[i]);
 1247                 }
 1248         }
 1249 
 1250         return (0);
 1251 }
 1252 
 1253 static void
 1254 msk_phy_power(struct msk_softc *sc, int mode)
 1255 {
 1256         uint32_t our, val;
 1257         int i;
 1258 
 1259         switch (mode) {
 1260         case MSK_PHY_POWERUP:
 1261                 /* Switch power to VCC (WA for VAUX problem). */
 1262                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1263                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1264                 /* Disable Core Clock Division, set Clock Select to 0. */
 1265                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1266 
 1267                 val = 0;
 1268                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1269                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1270                         /* Enable bits are inverted. */
 1271                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1272                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1273                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1274                 }
 1275                 /*
 1276                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1277                  */
 1278                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1279 
 1280                 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1281                 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1282                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1283                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1284                                 /* Deassert Low Power for 1st PHY. */
 1285                                 our |= PCI_Y2_PHY1_COMA;
 1286                                 if (sc->msk_num_port > 1)
 1287                                         our |= PCI_Y2_PHY2_COMA;
 1288                         }
 1289                 }
 1290                 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
 1291                     sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1292                     sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
 1293                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
 1294                         val &= (PCI_FORCE_ASPM_REQUEST |
 1295                             PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
 1296                             PCI_ASPM_CLKRUN_REQUEST);
 1297                         /* Set all bits to 0 except bits 15..12. */
 1298                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
 1299                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
 1300                         val &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1301                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
 1302                         CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
 1303                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
 1304                         /*
 1305                          * Disable status race, workaround for
 1306                          * Yukon EC Ultra & Yukon EX.
 1307                          */
 1308                         val = CSR_READ_4(sc, B2_GP_IO);
 1309                         val |= GLB_GPIO_STAT_RACE_DIS;
 1310                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1311                         CSR_READ_4(sc, B2_GP_IO);
 1312                 }
 1313                 /* Release PHY from PowerDown/COMA mode. */
 1314                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
 1315 
 1316                 for (i = 0; i < sc->msk_num_port; i++) {
 1317                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1318                             GMLC_RST_SET);
 1319                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1320                             GMLC_RST_CLR);
 1321                 }
 1322                 break;
 1323         case MSK_PHY_POWERDOWN:
 1324                 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1325                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1326                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1327                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1328                         val &= ~PCI_Y2_PHY1_COMA;
 1329                         if (sc->msk_num_port > 1)
 1330                                 val &= ~PCI_Y2_PHY2_COMA;
 1331                 }
 1332                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1333 
 1334                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1335                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1336                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1337                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1338                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1339                         /* Enable bits are inverted. */
 1340                         val = 0;
 1341                 }
 1342                 /*
 1343                  * Disable PCI & Core Clock, disable clock gating for
 1344                  * both Links.
 1345                  */
 1346                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1347                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1348                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1349                 break;
 1350         default:
 1351                 break;
 1352         }
 1353 }
 1354 
 1355 static void
 1356 mskc_reset(struct msk_softc *sc)
 1357 {
 1358         bus_addr_t addr;
 1359         uint16_t status;
 1360         uint32_t val;
 1361         int i, initram;
 1362 
 1363         /* Disable ASF. */
 1364         if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
 1365             sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
 1366                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1367                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1368                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1369                         status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1370                         /* Clear AHB bridge & microcontroller reset. */
 1371                         status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1372                             Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1373                         /* Clear ASF microcontroller state. */
 1374                         status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1375                         status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
 1376                         CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1377                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1378                 } else
 1379                         CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1380                 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1381                 /*
 1382                  * Since we disabled ASF, S/W reset is required for
 1383                  * Power Management.
 1384                  */
 1385                 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1386                 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1387         }
 1388 
 1389         /* Clear all error bits in the PCI status register. */
 1390         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1391         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1392 
 1393         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1394             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1395             PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 1396         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1397 
 1398         switch (sc->msk_bustype) {
 1399         case MSK_PEX_BUS:
 1400                 /* Clear all PEX errors. */
 1401                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1402                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1403                 if ((val & PEX_RX_OV) != 0) {
 1404                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1405                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1406                 }
 1407                 break;
 1408         case MSK_PCI_BUS:
 1409         case MSK_PCIX_BUS:
 1410                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1411                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1412                 if (val == 0)
 1413                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1414                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1415                         /* Set Cache Line Size opt. */
 1416                         val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1417                         val |= PCI_CLS_OPT;
 1418                         pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1419                 }
 1420                 break;
 1421         }
 1422         /* Set PHY power state. */
 1423         msk_phy_power(sc, MSK_PHY_POWERUP);
 1424 
 1425         /* Reset GPHY/GMAC Control */
 1426         for (i = 0; i < sc->msk_num_port; i++) {
 1427                 /* GPHY Control reset. */
 1428                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1429                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1430                 /* GMAC Control reset. */
 1431                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1432                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1433                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1434                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1435                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 1436                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1437                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1438                             GMC_BYP_RETR_ON);
 1439         }
 1440 
 1441         if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
 1442             sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
 1443                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
 1444         if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
 1445                 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
 1446                 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
 1447         }
 1448         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1449 
 1450         /* LED On. */
 1451         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1452 
 1453         /* Clear TWSI IRQ. */
 1454         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1455 
 1456         /* Turn off hardware timer. */
 1457         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1458         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1459 
 1460         /* Turn off descriptor polling. */
 1461         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1462 
 1463         /* Turn off time stamps. */
 1464         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1465         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1466 
 1467         initram = 0;
 1468         if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
 1469             sc->msk_hw_id == CHIP_ID_YUKON_EC ||
 1470             sc->msk_hw_id == CHIP_ID_YUKON_FE)
 1471                 initram++;
 1472 
 1473         /* Configure timeout values. */
 1474         for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
 1475                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
 1476                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
 1477                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1478                     MSK_RI_TO_53);
 1479                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1480                     MSK_RI_TO_53);
 1481                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1482                     MSK_RI_TO_53);
 1483                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1484                     MSK_RI_TO_53);
 1485                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1486                     MSK_RI_TO_53);
 1487                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1488                     MSK_RI_TO_53);
 1489                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1490                     MSK_RI_TO_53);
 1491                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1492                     MSK_RI_TO_53);
 1493                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1494                     MSK_RI_TO_53);
 1495                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1496                     MSK_RI_TO_53);
 1497                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1498                     MSK_RI_TO_53);
 1499                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1500                     MSK_RI_TO_53);
 1501         }
 1502 
 1503         /* Disable all interrupts. */
 1504         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1505         CSR_READ_4(sc, B0_HWE_IMSK);
 1506         CSR_WRITE_4(sc, B0_IMSK, 0);
 1507         CSR_READ_4(sc, B0_IMSK);
 1508 
 1509         /*
 1510          * On dual port PCI-X card, there is an problem where status
 1511          * can be received out of order due to split transactions.
 1512          */
 1513         if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
 1514                 uint16_t pcix_cmd;
 1515 
 1516                 pcix_cmd = pci_read_config(sc->msk_dev,
 1517                     sc->msk_pcixcap + PCIXR_COMMAND, 2);
 1518                 /* Clear Max Outstanding Split Transactions. */
 1519                 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
 1520                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1521                 pci_write_config(sc->msk_dev,
 1522                     sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
 1523                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1524         }
 1525         if (sc->msk_expcap != 0) {
 1526                 /* Change Max. Read Request Size to 2048 bytes. */
 1527                 if (pci_get_max_read_req(sc->msk_dev) == 512)
 1528                         pci_set_max_read_req(sc->msk_dev, 2048);
 1529         }
 1530 
 1531         /* Clear status list. */
 1532         bzero(sc->msk_stat_ring,
 1533             sizeof(struct msk_stat_desc) * sc->msk_stat_count);
 1534         sc->msk_stat_cons = 0;
 1535         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 1536             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1537         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1538         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1539         /* Set the status list base address. */
 1540         addr = sc->msk_stat_ring_paddr;
 1541         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1542         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1543         /* Set the status list last index. */
 1544         CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
 1545         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1546             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1547                 /* WA for dev. #4.3 */
 1548                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1549                 /* WA for dev. #4.18 */
 1550                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1551                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1552         } else {
 1553                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1554                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1555                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1556                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1557                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1558                 else
 1559                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1560                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1561         }
 1562         /*
 1563          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1564          */
 1565         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1566 
 1567         /* Enable status unit. */
 1568         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1569 
 1570         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1571         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1572         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1573 }
 1574 
 1575 static int
 1576 msk_probe(device_t dev)
 1577 {
 1578         struct msk_softc *sc;
 1579         char desc[100];
 1580 
 1581         sc = device_get_softc(device_get_parent(dev));
 1582         /*
 1583          * Not much to do here. We always know there will be
 1584          * at least one GMAC present, and if there are two,
 1585          * mskc_attach() will create a second device instance
 1586          * for us.
 1587          */
 1588         snprintf(desc, sizeof(desc),
 1589             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1590             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1591             sc->msk_hw_rev);
 1592         device_set_desc_copy(dev, desc);
 1593 
 1594         return (BUS_PROBE_DEFAULT);
 1595 }
 1596 
 1597 static int
 1598 msk_attach(device_t dev)
 1599 {
 1600         struct msk_softc *sc;
 1601         struct msk_if_softc *sc_if;
 1602         struct ifnet *ifp;
 1603         struct msk_mii_data *mmd;
 1604         int i, port, error;
 1605         uint8_t eaddr[6];
 1606 
 1607         if (dev == NULL)
 1608                 return (EINVAL);
 1609 
 1610         error = 0;
 1611         sc_if = device_get_softc(dev);
 1612         sc = device_get_softc(device_get_parent(dev));
 1613         mmd = device_get_ivars(dev);
 1614         port = mmd->port;
 1615 
 1616         sc_if->msk_if_dev = dev;
 1617         sc_if->msk_port = port;
 1618         sc_if->msk_softc = sc;
 1619         sc_if->msk_flags = sc->msk_pflags;
 1620         sc->msk_if[port] = sc_if;
 1621         /* Setup Tx/Rx queue register offsets. */
 1622         if (port == MSK_PORT_A) {
 1623                 sc_if->msk_txq = Q_XA1;
 1624                 sc_if->msk_txsq = Q_XS1;
 1625                 sc_if->msk_rxq = Q_R1;
 1626         } else {
 1627                 sc_if->msk_txq = Q_XA2;
 1628                 sc_if->msk_txsq = Q_XS2;
 1629                 sc_if->msk_rxq = Q_R2;
 1630         }
 1631 
 1632         callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
 1633         msk_sysctl_node(sc_if);
 1634 
 1635         if ((error = msk_txrx_dma_alloc(sc_if) != 0))
 1636                 goto fail;
 1637         msk_rx_dma_jalloc(sc_if);
 1638 
 1639         ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
 1640         if (ifp == NULL) {
 1641                 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
 1642                 error = ENOSPC;
 1643                 goto fail;
 1644         }
 1645         ifp->if_softc = sc_if;
 1646         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1647         ifp->if_mtu = ETHERMTU;
 1648         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1649         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
 1650         /*
 1651          * Enable Rx checksum offloading if controller supports
 1652          * new descriptor formant and controller is not Yukon XL.
 1653          */
 1654         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 1655             sc->msk_hw_id != CHIP_ID_YUKON_XL)
 1656                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1657         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1658             (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1659                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1660         ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
 1661         ifp->if_capenable = ifp->if_capabilities;
 1662         ifp->if_ioctl = msk_ioctl;
 1663         ifp->if_start = msk_start;
 1664         ifp->if_init = msk_init;
 1665         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1666         ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
 1667         IFQ_SET_READY(&ifp->if_snd);
 1668         /*
 1669          * Get station address for this interface. Note that
 1670          * dual port cards actually come with three station
 1671          * addresses: one for each port, plus an extra. The
 1672          * extra one is used by the SysKonnect driver software
 1673          * as a 'virtual' station address for when both ports
 1674          * are operating in failover mode. Currently we don't
 1675          * use this extra address.
 1676          */
 1677         MSK_IF_LOCK(sc_if);
 1678         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1679                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1680 
 1681         /*
 1682          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1683          */
 1684         MSK_IF_UNLOCK(sc_if);
 1685         ether_ifattach(ifp, eaddr);
 1686         MSK_IF_LOCK(sc_if);
 1687 
 1688         /* VLAN capability setup */
 1689         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1690         if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
 1691                 /*
 1692                  * Due to Tx checksum offload hardware bugs, msk(4) manually
 1693                  * computes checksum for short frames. For VLAN tagged frames
 1694                  * this workaround does not work so disable checksum offload
 1695                  * for VLAN interface.
 1696                  */
 1697                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
 1698                 /*
 1699                  * Enable Rx checksum offloading for VLAN taggedd frames
 1700                  * if controller support new descriptor format.
 1701                  */
 1702                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1703                     (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1704                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
 1705         }
 1706         ifp->if_capenable = ifp->if_capabilities;
 1707 
 1708         /*
 1709          * Tell the upper layer(s) we support long frames.
 1710          * Must appear after the call to ether_ifattach() because
 1711          * ether_ifattach() sets ifi_hdrlen to the default value.
 1712          */
 1713         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1714 
 1715         /*
 1716          * Do miibus setup.
 1717          */
 1718         MSK_IF_UNLOCK(sc_if);
 1719         error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
 1720             msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
 1721             mmd->mii_flags);
 1722         if (error != 0) {
 1723                 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
 1724                 ether_ifdetach(ifp);
 1725                 error = ENXIO;
 1726                 goto fail;
 1727         }
 1728 
 1729 fail:
 1730         if (error != 0) {
 1731                 /* Access should be ok even though lock has been dropped */
 1732                 sc->msk_if[port] = NULL;
 1733                 msk_detach(dev);
 1734         }
 1735 
 1736         return (error);
 1737 }
 1738 
 1739 /*
 1740  * Attach the interface. Allocate softc structures, do ifmedia
 1741  * setup and ethernet/BPF attach.
 1742  */
 1743 static int
 1744 mskc_attach(device_t dev)
 1745 {
 1746         struct msk_softc *sc;
 1747         struct msk_mii_data *mmd;
 1748         int error, msic, msir, reg;
 1749 
 1750         sc = device_get_softc(dev);
 1751         sc->msk_dev = dev;
 1752         mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1753             MTX_DEF);
 1754 
 1755         /*
 1756          * Map control/status registers.
 1757          */
 1758         pci_enable_busmaster(dev);
 1759 
 1760         /* Allocate I/O resource */
 1761 #ifdef MSK_USEIOSPACE
 1762         sc->msk_res_spec = msk_res_spec_io;
 1763 #else
 1764         sc->msk_res_spec = msk_res_spec_mem;
 1765 #endif
 1766         sc->msk_irq_spec = msk_irq_spec_legacy;
 1767         error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1768         if (error) {
 1769                 if (sc->msk_res_spec == msk_res_spec_mem)
 1770                         sc->msk_res_spec = msk_res_spec_io;
 1771                 else
 1772                         sc->msk_res_spec = msk_res_spec_mem;
 1773                 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1774                 if (error) {
 1775                         device_printf(dev, "couldn't allocate %s resources\n",
 1776                             sc->msk_res_spec == msk_res_spec_mem ? "memory" :
 1777                             "I/O");
 1778                         mtx_destroy(&sc->msk_mtx);
 1779                         return (ENXIO);
 1780                 }
 1781         }
 1782 
 1783         /* Enable all clocks before accessing any registers. */
 1784         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 1785 
 1786         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1787         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1788         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1789         /* Bail out if chip is not recognized. */
 1790         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1791             sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
 1792             sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
 1793                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1794                     sc->msk_hw_id, sc->msk_hw_rev);
 1795                 mtx_destroy(&sc->msk_mtx);
 1796                 return (ENXIO);
 1797         }
 1798 
 1799         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1800             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1801             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 1802             &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
 1803             "max number of Rx events to process");
 1804 
 1805         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1806         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1807             "process_limit", &sc->msk_process_limit);
 1808         if (error == 0) {
 1809                 if (sc->msk_process_limit < MSK_PROC_MIN ||
 1810                     sc->msk_process_limit > MSK_PROC_MAX) {
 1811                         device_printf(dev, "process_limit value out of range; "
 1812                             "using default: %d\n", MSK_PROC_DEFAULT);
 1813                         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1814                 }
 1815         }
 1816 
 1817         sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
 1818         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
 1819             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
 1820             "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
 1821             "Maximum number of time to delay interrupts");
 1822         resource_int_value(device_get_name(dev), device_get_unit(dev),
 1823             "int_holdoff", &sc->msk_int_holdoff);
 1824 
 1825         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1826         /* Check number of MACs. */
 1827         sc->msk_num_port = 1;
 1828         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1829             CFG_DUAL_MAC_MSK) {
 1830                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1831                         sc->msk_num_port++;
 1832         }
 1833 
 1834         /* Check bus type. */
 1835         if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
 1836                 sc->msk_bustype = MSK_PEX_BUS;
 1837                 sc->msk_expcap = reg;
 1838         } else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
 1839                 sc->msk_bustype = MSK_PCIX_BUS;
 1840                 sc->msk_pcixcap = reg;
 1841         } else
 1842                 sc->msk_bustype = MSK_PCI_BUS;
 1843 
 1844         switch (sc->msk_hw_id) {
 1845         case CHIP_ID_YUKON_EC:
 1846                 sc->msk_clock = 125;    /* 125 MHz */
 1847                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1848                 break;
 1849         case CHIP_ID_YUKON_EC_U:
 1850                 sc->msk_clock = 125;    /* 125 MHz */
 1851                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
 1852                 break;
 1853         case CHIP_ID_YUKON_EX:
 1854                 sc->msk_clock = 125;    /* 125 MHz */
 1855                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1856                     MSK_FLAG_AUTOTX_CSUM;
 1857                 /*
 1858                  * Yukon Extreme seems to have silicon bug for
 1859                  * automatic Tx checksum calculation capability.
 1860                  */
 1861                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 1862                         sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
 1863                 /*
 1864                  * Yukon Extreme A0 could not use store-and-forward
 1865                  * for jumbo frames, so disable Tx checksum
 1866                  * offloading for jumbo frames.
 1867                  */
 1868                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 1869                         sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
 1870                 break;
 1871         case CHIP_ID_YUKON_FE:
 1872                 sc->msk_clock = 100;    /* 100 MHz */
 1873                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1874                 break;
 1875         case CHIP_ID_YUKON_FE_P:
 1876                 sc->msk_clock = 50;     /* 50 MHz */
 1877                 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
 1878                     MSK_FLAG_AUTOTX_CSUM;
 1879                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1880                         /*
 1881                          * XXX
 1882                          * FE+ A0 has status LE writeback bug so msk(4)
 1883                          * does not rely on status word of received frame
 1884                          * in msk_rxeof() which in turn disables all
 1885                          * hardware assistance bits reported by the status
 1886                          * word as well as validity of the recevied frame.
 1887                          * Just pass received frames to upper stack with
 1888                          * minimal test and let upper stack handle them.
 1889                          */
 1890                         sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
 1891                             MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
 1892                 }
 1893                 break;
 1894         case CHIP_ID_YUKON_XL:
 1895                 sc->msk_clock = 156;    /* 156 MHz */
 1896                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1897                 break;
 1898         case CHIP_ID_YUKON_SUPR:
 1899                 sc->msk_clock = 125;    /* 125 MHz */
 1900                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1901                     MSK_FLAG_AUTOTX_CSUM;
 1902                 break;
 1903         case CHIP_ID_YUKON_UL_2:
 1904                 sc->msk_clock = 125;    /* 125 MHz */
 1905                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1906                 break;
 1907         case CHIP_ID_YUKON_OPT:
 1908                 sc->msk_clock = 125;    /* 125 MHz */
 1909                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
 1910                 break;
 1911         default:
 1912                 sc->msk_clock = 156;    /* 156 MHz */
 1913                 break;
 1914         }
 1915 
 1916         /* Allocate IRQ resources. */
 1917         msic = pci_msi_count(dev);
 1918         if (bootverbose)
 1919                 device_printf(dev, "MSI count : %d\n", msic);
 1920         if (legacy_intr != 0)
 1921                 msi_disable = 1;
 1922         if (msi_disable == 0 && msic > 0) {
 1923                 msir = 1;
 1924                 if (pci_alloc_msi(dev, &msir) == 0) {
 1925                         if (msir == 1) {
 1926                                 sc->msk_pflags |= MSK_FLAG_MSI;
 1927                                 sc->msk_irq_spec = msk_irq_spec_msi;
 1928                         } else
 1929                                 pci_release_msi(dev);
 1930                 }
 1931         }
 1932 
 1933         error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1934         if (error) {
 1935                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1936                 goto fail;
 1937         }
 1938 
 1939         if ((error = msk_status_dma_alloc(sc)) != 0)
 1940                 goto fail;
 1941 
 1942         /* Set base interrupt mask. */
 1943         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1944         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1945             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1946 
 1947         /* Reset the adapter. */
 1948         mskc_reset(sc);
 1949 
 1950         if ((error = mskc_setup_rambuffer(sc)) != 0)
 1951                 goto fail;
 1952 
 1953         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1954         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1955                 device_printf(dev, "failed to add child for PORT_A\n");
 1956                 error = ENXIO;
 1957                 goto fail;
 1958         }
 1959         mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
 1960         if (mmd == NULL) {
 1961                 device_printf(dev, "failed to allocate memory for "
 1962                     "ivars of PORT_A\n");
 1963                 error = ENXIO;
 1964                 goto fail;
 1965         }
 1966         mmd->port = MSK_PORT_A;
 1967         mmd->pmd = sc->msk_pmd;
 1968         mmd->mii_flags |= MIIF_DOPAUSE | MIIF_FORCEPAUSE;
 1969         if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1970                 mmd->mii_flags |= MIIF_HAVEFIBER;
 1971         if (sc->msk_pmd == 'P')
 1972                 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1973         device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
 1974 
 1975         if (sc->msk_num_port > 1) {
 1976                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1977                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1978                         device_printf(dev, "failed to add child for PORT_B\n");
 1979                         error = ENXIO;
 1980                         goto fail;
 1981                 }
 1982                 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
 1983                     M_ZERO);
 1984                 if (mmd == NULL) {
 1985                         device_printf(dev, "failed to allocate memory for "
 1986                             "ivars of PORT_B\n");
 1987                         error = ENXIO;
 1988                         goto fail;
 1989                 }
 1990                 mmd->port = MSK_PORT_B;
 1991                 mmd->pmd = sc->msk_pmd;
 1992                 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1993                         mmd->mii_flags |= MIIF_HAVEFIBER;
 1994                 if (sc->msk_pmd == 'P')
 1995                         mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1996                 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
 1997         }
 1998 
 1999         error = bus_generic_attach(dev);
 2000         if (error) {
 2001                 device_printf(dev, "failed to attach port(s)\n");
 2002                 goto fail;
 2003         }
 2004 
 2005         /* Hook interrupt last to avoid having to lock softc. */
 2006         error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 2007             INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
 2008         if (error != 0) {
 2009                 device_printf(dev, "couldn't set up interrupt handler\n");
 2010                 goto fail;
 2011         }
 2012 fail:
 2013         if (error != 0)
 2014                 mskc_detach(dev);
 2015 
 2016         return (error);
 2017 }
 2018 
 2019 /*
 2020  * Shutdown hardware and free up resources. This can be called any
 2021  * time after the mutex has been initialized. It is called in both
 2022  * the error case in attach and the normal detach case so it needs
 2023  * to be careful about only freeing resources that have actually been
 2024  * allocated.
 2025  */
 2026 static int
 2027 msk_detach(device_t dev)
 2028 {
 2029         struct msk_softc *sc;
 2030         struct msk_if_softc *sc_if;
 2031         struct ifnet *ifp;
 2032 
 2033         sc_if = device_get_softc(dev);
 2034         KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
 2035             ("msk mutex not initialized in msk_detach"));
 2036         MSK_IF_LOCK(sc_if);
 2037 
 2038         ifp = sc_if->msk_ifp;
 2039         if (device_is_attached(dev)) {
 2040                 /* XXX */
 2041                 sc_if->msk_flags |= MSK_FLAG_DETACH;
 2042                 msk_stop(sc_if);
 2043                 /* Can't hold locks while calling detach. */
 2044                 MSK_IF_UNLOCK(sc_if);
 2045                 callout_drain(&sc_if->msk_tick_ch);
 2046                 ether_ifdetach(ifp);
 2047                 MSK_IF_LOCK(sc_if);
 2048         }
 2049 
 2050         /*
 2051          * We're generally called from mskc_detach() which is using
 2052          * device_delete_child() to get to here. It's already trashed
 2053          * miibus for us, so don't do it here or we'll panic.
 2054          *
 2055          * if (sc_if->msk_miibus != NULL) {
 2056          *      device_delete_child(dev, sc_if->msk_miibus);
 2057          *      sc_if->msk_miibus = NULL;
 2058          * }
 2059          */
 2060 
 2061         msk_rx_dma_jfree(sc_if);
 2062         msk_txrx_dma_free(sc_if);
 2063         bus_generic_detach(dev);
 2064 
 2065         if (ifp)
 2066                 if_free(ifp);
 2067         sc = sc_if->msk_softc;
 2068         sc->msk_if[sc_if->msk_port] = NULL;
 2069         MSK_IF_UNLOCK(sc_if);
 2070 
 2071         return (0);
 2072 }
 2073 
 2074 static int
 2075 mskc_detach(device_t dev)
 2076 {
 2077         struct msk_softc *sc;
 2078 
 2079         sc = device_get_softc(dev);
 2080         KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
 2081 
 2082         if (device_is_alive(dev)) {
 2083                 if (sc->msk_devs[MSK_PORT_A] != NULL) {
 2084                         free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
 2085                             M_DEVBUF);
 2086                         device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
 2087                 }
 2088                 if (sc->msk_devs[MSK_PORT_B] != NULL) {
 2089                         free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
 2090                             M_DEVBUF);
 2091                         device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
 2092                 }
 2093                 bus_generic_detach(dev);
 2094         }
 2095 
 2096         /* Disable all interrupts. */
 2097         CSR_WRITE_4(sc, B0_IMSK, 0);
 2098         CSR_READ_4(sc, B0_IMSK);
 2099         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2100         CSR_READ_4(sc, B0_HWE_IMSK);
 2101 
 2102         /* LED Off. */
 2103         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 2104 
 2105         /* Put hardware reset. */
 2106         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2107 
 2108         msk_status_dma_free(sc);
 2109 
 2110         if (sc->msk_intrhand) {
 2111                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
 2112                 sc->msk_intrhand = NULL;
 2113         }
 2114         bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 2115         if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
 2116                 pci_release_msi(dev);
 2117         bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
 2118         mtx_destroy(&sc->msk_mtx);
 2119 
 2120         return (0);
 2121 }
 2122 
 2123 struct msk_dmamap_arg {
 2124         bus_addr_t      msk_busaddr;
 2125 };
 2126 
 2127 static void
 2128 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2129 {
 2130         struct msk_dmamap_arg *ctx;
 2131 
 2132         if (error != 0)
 2133                 return;
 2134         ctx = arg;
 2135         ctx->msk_busaddr = segs[0].ds_addr;
 2136 }
 2137 
 2138 /* Create status DMA region. */
 2139 static int
 2140 msk_status_dma_alloc(struct msk_softc *sc)
 2141 {
 2142         struct msk_dmamap_arg ctx;
 2143         bus_size_t stat_sz;
 2144         int count, error;
 2145 
 2146         /*
 2147          * It seems controller requires number of status LE entries
 2148          * is power of 2 and the maximum number of status LE entries
 2149          * is 4096.  For dual-port controllers, the number of status
 2150          * LE entries should be large enough to hold both port's
 2151          * status updates.
 2152          */
 2153         count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
 2154         count = imin(4096, roundup2(count, 1024));
 2155         sc->msk_stat_count = count;
 2156         stat_sz = count * sizeof(struct msk_stat_desc);
 2157         error = bus_dma_tag_create(
 2158                     bus_get_dma_tag(sc->msk_dev),       /* parent */
 2159                     MSK_STAT_ALIGN, 0,          /* alignment, boundary */
 2160                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2161                     BUS_SPACE_MAXADDR,          /* highaddr */
 2162                     NULL, NULL,                 /* filter, filterarg */
 2163                     stat_sz,                    /* maxsize */
 2164                     1,                          /* nsegments */
 2165                     stat_sz,                    /* maxsegsize */
 2166                     0,                          /* flags */
 2167                     NULL, NULL,                 /* lockfunc, lockarg */
 2168                     &sc->msk_stat_tag);
 2169         if (error != 0) {
 2170                 device_printf(sc->msk_dev,
 2171                     "failed to create status DMA tag\n");
 2172                 return (error);
 2173         }
 2174 
 2175         /* Allocate DMA'able memory and load the DMA map for status ring. */
 2176         error = bus_dmamem_alloc(sc->msk_stat_tag,
 2177             (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
 2178             BUS_DMA_ZERO, &sc->msk_stat_map);
 2179         if (error != 0) {
 2180                 device_printf(sc->msk_dev,
 2181                     "failed to allocate DMA'able memory for status ring\n");
 2182                 return (error);
 2183         }
 2184 
 2185         ctx.msk_busaddr = 0;
 2186         error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
 2187             sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2188         if (error != 0) {
 2189                 device_printf(sc->msk_dev,
 2190                     "failed to load DMA'able memory for status ring\n");
 2191                 return (error);
 2192         }
 2193         sc->msk_stat_ring_paddr = ctx.msk_busaddr;
 2194 
 2195         return (0);
 2196 }
 2197 
 2198 static void
 2199 msk_status_dma_free(struct msk_softc *sc)
 2200 {
 2201 
 2202         /* Destroy status block. */
 2203         if (sc->msk_stat_tag) {
 2204                 if (sc->msk_stat_map) {
 2205                         bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 2206                         if (sc->msk_stat_ring) {
 2207                                 bus_dmamem_free(sc->msk_stat_tag,
 2208                                     sc->msk_stat_ring, sc->msk_stat_map);
 2209                                 sc->msk_stat_ring = NULL;
 2210                         }
 2211                         sc->msk_stat_map = NULL;
 2212                 }
 2213                 bus_dma_tag_destroy(sc->msk_stat_tag);
 2214                 sc->msk_stat_tag = NULL;
 2215         }
 2216 }
 2217 
 2218 static int
 2219 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 2220 {
 2221         struct msk_dmamap_arg ctx;
 2222         struct msk_txdesc *txd;
 2223         struct msk_rxdesc *rxd;
 2224         bus_size_t rxalign;
 2225         int error, i;
 2226 
 2227         /* Create parent DMA tag. */
 2228         error = bus_dma_tag_create(
 2229                     bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
 2230                     1, 0,                       /* alignment, boundary */
 2231                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2232                     BUS_SPACE_MAXADDR,          /* highaddr */
 2233                     NULL, NULL,                 /* filter, filterarg */
 2234                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 2235                     0,                          /* nsegments */
 2236                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 2237                     0,                          /* flags */
 2238                     NULL, NULL,                 /* lockfunc, lockarg */
 2239                     &sc_if->msk_cdata.msk_parent_tag);
 2240         if (error != 0) {
 2241                 device_printf(sc_if->msk_if_dev,
 2242                     "failed to create parent DMA tag\n");
 2243                 goto fail;
 2244         }
 2245         /* Create tag for Tx ring. */
 2246         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2247                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2248                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2249                     BUS_SPACE_MAXADDR,          /* highaddr */
 2250                     NULL, NULL,                 /* filter, filterarg */
 2251                     MSK_TX_RING_SZ,             /* maxsize */
 2252                     1,                          /* nsegments */
 2253                     MSK_TX_RING_SZ,             /* maxsegsize */
 2254                     0,                          /* flags */
 2255                     NULL, NULL,                 /* lockfunc, lockarg */
 2256                     &sc_if->msk_cdata.msk_tx_ring_tag);
 2257         if (error != 0) {
 2258                 device_printf(sc_if->msk_if_dev,
 2259                     "failed to create Tx ring DMA tag\n");
 2260                 goto fail;
 2261         }
 2262 
 2263         /* Create tag for Rx ring. */
 2264         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2265                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2266                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2267                     BUS_SPACE_MAXADDR,          /* highaddr */
 2268                     NULL, NULL,                 /* filter, filterarg */
 2269                     MSK_RX_RING_SZ,             /* maxsize */
 2270                     1,                          /* nsegments */
 2271                     MSK_RX_RING_SZ,             /* maxsegsize */
 2272                     0,                          /* flags */
 2273                     NULL, NULL,                 /* lockfunc, lockarg */
 2274                     &sc_if->msk_cdata.msk_rx_ring_tag);
 2275         if (error != 0) {
 2276                 device_printf(sc_if->msk_if_dev,
 2277                     "failed to create Rx ring DMA tag\n");
 2278                 goto fail;
 2279         }
 2280 
 2281         /* Create tag for Tx buffers. */
 2282         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2283                     1, 0,                       /* alignment, boundary */
 2284                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2285                     BUS_SPACE_MAXADDR,          /* highaddr */
 2286                     NULL, NULL,                 /* filter, filterarg */
 2287                     MSK_TSO_MAXSIZE,            /* maxsize */
 2288                     MSK_MAXTXSEGS,              /* nsegments */
 2289                     MSK_TSO_MAXSGSIZE,          /* maxsegsize */
 2290                     0,                          /* flags */
 2291                     NULL, NULL,                 /* lockfunc, lockarg */
 2292                     &sc_if->msk_cdata.msk_tx_tag);
 2293         if (error != 0) {
 2294                 device_printf(sc_if->msk_if_dev,
 2295                     "failed to create Tx DMA tag\n");
 2296                 goto fail;
 2297         }
 2298 
 2299         rxalign = 1;
 2300         /*
 2301          * Workaround hardware hang which seems to happen when Rx buffer
 2302          * is not aligned on multiple of FIFO word(8 bytes).
 2303          */
 2304         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2305                 rxalign = MSK_RX_BUF_ALIGN;
 2306         /* Create tag for Rx buffers. */
 2307         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2308                     rxalign, 0,                 /* alignment, boundary */
 2309                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2310                     BUS_SPACE_MAXADDR,          /* highaddr */
 2311                     NULL, NULL,                 /* filter, filterarg */
 2312                     MCLBYTES,                   /* maxsize */
 2313                     1,                          /* nsegments */
 2314                     MCLBYTES,                   /* maxsegsize */
 2315                     0,                          /* flags */
 2316                     NULL, NULL,                 /* lockfunc, lockarg */
 2317                     &sc_if->msk_cdata.msk_rx_tag);
 2318         if (error != 0) {
 2319                 device_printf(sc_if->msk_if_dev,
 2320                     "failed to create Rx DMA tag\n");
 2321                 goto fail;
 2322         }
 2323 
 2324         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 2325         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
 2326             (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
 2327             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
 2328         if (error != 0) {
 2329                 device_printf(sc_if->msk_if_dev,
 2330                     "failed to allocate DMA'able memory for Tx ring\n");
 2331                 goto fail;
 2332         }
 2333 
 2334         ctx.msk_busaddr = 0;
 2335         error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
 2336             sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
 2337             MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2338         if (error != 0) {
 2339                 device_printf(sc_if->msk_if_dev,
 2340                     "failed to load DMA'able memory for Tx ring\n");
 2341                 goto fail;
 2342         }
 2343         sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
 2344 
 2345         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 2346         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
 2347             (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
 2348             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
 2349         if (error != 0) {
 2350                 device_printf(sc_if->msk_if_dev,
 2351                     "failed to allocate DMA'able memory for Rx ring\n");
 2352                 goto fail;
 2353         }
 2354 
 2355         ctx.msk_busaddr = 0;
 2356         error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
 2357             sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
 2358             MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2359         if (error != 0) {
 2360                 device_printf(sc_if->msk_if_dev,
 2361                     "failed to load DMA'able memory for Rx ring\n");
 2362                 goto fail;
 2363         }
 2364         sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
 2365 
 2366         /* Create DMA maps for Tx buffers. */
 2367         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2368                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 2369                 txd->tx_m = NULL;
 2370                 txd->tx_dmamap = NULL;
 2371                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
 2372                     &txd->tx_dmamap);
 2373                 if (error != 0) {
 2374                         device_printf(sc_if->msk_if_dev,
 2375                             "failed to create Tx dmamap\n");
 2376                         goto fail;
 2377                 }
 2378         }
 2379         /* Create DMA maps for Rx buffers. */
 2380         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2381             &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
 2382                 device_printf(sc_if->msk_if_dev,
 2383                     "failed to create spare Rx dmamap\n");
 2384                 goto fail;
 2385         }
 2386         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2387                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2388                 rxd->rx_m = NULL;
 2389                 rxd->rx_dmamap = NULL;
 2390                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2391                     &rxd->rx_dmamap);
 2392                 if (error != 0) {
 2393                         device_printf(sc_if->msk_if_dev,
 2394                             "failed to create Rx dmamap\n");
 2395                         goto fail;
 2396                 }
 2397         }
 2398 
 2399 fail:
 2400         return (error);
 2401 }
 2402 
 2403 static int
 2404 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
 2405 {
 2406         struct msk_dmamap_arg ctx;
 2407         struct msk_rxdesc *jrxd;
 2408         bus_size_t rxalign;
 2409         int error, i;
 2410 
 2411         if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 2412                 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2413                 device_printf(sc_if->msk_if_dev,
 2414                     "disabling jumbo frame support\n");
 2415                 return (0);
 2416         }
 2417         /* Create tag for jumbo Rx ring. */
 2418         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2419                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2420                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2421                     BUS_SPACE_MAXADDR,          /* highaddr */
 2422                     NULL, NULL,                 /* filter, filterarg */
 2423                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2424                     1,                          /* nsegments */
 2425                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2426                     0,                          /* flags */
 2427                     NULL, NULL,                 /* lockfunc, lockarg */
 2428                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2429         if (error != 0) {
 2430                 device_printf(sc_if->msk_if_dev,
 2431                     "failed to create jumbo Rx ring DMA tag\n");
 2432                 goto jumbo_fail;
 2433         }
 2434 
 2435         rxalign = 1;
 2436         /*
 2437          * Workaround hardware hang which seems to happen when Rx buffer
 2438          * is not aligned on multiple of FIFO word(8 bytes).
 2439          */
 2440         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2441                 rxalign = MSK_RX_BUF_ALIGN;
 2442         /* Create tag for jumbo Rx buffers. */
 2443         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2444                     rxalign, 0,                 /* alignment, boundary */
 2445                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2446                     BUS_SPACE_MAXADDR,          /* highaddr */
 2447                     NULL, NULL,                 /* filter, filterarg */
 2448                     MJUM9BYTES,                 /* maxsize */
 2449                     1,                          /* nsegments */
 2450                     MJUM9BYTES,                 /* maxsegsize */
 2451                     0,                          /* flags */
 2452                     NULL, NULL,                 /* lockfunc, lockarg */
 2453                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2454         if (error != 0) {
 2455                 device_printf(sc_if->msk_if_dev,
 2456                     "failed to create jumbo Rx DMA tag\n");
 2457                 goto jumbo_fail;
 2458         }
 2459 
 2460         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2461         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2462             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2463             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2464             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2465         if (error != 0) {
 2466                 device_printf(sc_if->msk_if_dev,
 2467                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2468                 goto jumbo_fail;
 2469         }
 2470 
 2471         ctx.msk_busaddr = 0;
 2472         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2473             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2474             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2475             msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2476         if (error != 0) {
 2477                 device_printf(sc_if->msk_if_dev,
 2478                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2479                 goto jumbo_fail;
 2480         }
 2481         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2482 
 2483         /* Create DMA maps for jumbo Rx buffers. */
 2484         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2485             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2486                 device_printf(sc_if->msk_if_dev,
 2487                     "failed to create spare jumbo Rx dmamap\n");
 2488                 goto jumbo_fail;
 2489         }
 2490         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2491                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2492                 jrxd->rx_m = NULL;
 2493                 jrxd->rx_dmamap = NULL;
 2494                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2495                     &jrxd->rx_dmamap);
 2496                 if (error != 0) {
 2497                         device_printf(sc_if->msk_if_dev,
 2498                             "failed to create jumbo Rx dmamap\n");
 2499                         goto jumbo_fail;
 2500                 }
 2501         }
 2502 
 2503         return (0);
 2504 
 2505 jumbo_fail:
 2506         msk_rx_dma_jfree(sc_if);
 2507         device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
 2508             "due to resource shortage\n");
 2509         sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2510         return (error);
 2511 }
 2512 
 2513 static void
 2514 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2515 {
 2516         struct msk_txdesc *txd;
 2517         struct msk_rxdesc *rxd;
 2518         int i;
 2519 
 2520         /* Tx ring. */
 2521         if (sc_if->msk_cdata.msk_tx_ring_tag) {
 2522                 if (sc_if->msk_cdata.msk_tx_ring_map)
 2523                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
 2524                             sc_if->msk_cdata.msk_tx_ring_map);
 2525                 if (sc_if->msk_cdata.msk_tx_ring_map &&
 2526                     sc_if->msk_rdata.msk_tx_ring)
 2527                         bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
 2528                             sc_if->msk_rdata.msk_tx_ring,
 2529                             sc_if->msk_cdata.msk_tx_ring_map);
 2530                 sc_if->msk_rdata.msk_tx_ring = NULL;
 2531                 sc_if->msk_cdata.msk_tx_ring_map = NULL;
 2532                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
 2533                 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
 2534         }
 2535         /* Rx ring. */
 2536         if (sc_if->msk_cdata.msk_rx_ring_tag) {
 2537                 if (sc_if->msk_cdata.msk_rx_ring_map)
 2538                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
 2539                             sc_if->msk_cdata.msk_rx_ring_map);
 2540                 if (sc_if->msk_cdata.msk_rx_ring_map &&
 2541                     sc_if->msk_rdata.msk_rx_ring)
 2542                         bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
 2543                             sc_if->msk_rdata.msk_rx_ring,
 2544                             sc_if->msk_cdata.msk_rx_ring_map);
 2545                 sc_if->msk_rdata.msk_rx_ring = NULL;
 2546                 sc_if->msk_cdata.msk_rx_ring_map = NULL;
 2547                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
 2548                 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
 2549         }
 2550         /* Tx buffers. */
 2551         if (sc_if->msk_cdata.msk_tx_tag) {
 2552                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2553                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2554                         if (txd->tx_dmamap) {
 2555                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2556                                     txd->tx_dmamap);
 2557                                 txd->tx_dmamap = NULL;
 2558                         }
 2559                 }
 2560                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2561                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2562         }
 2563         /* Rx buffers. */
 2564         if (sc_if->msk_cdata.msk_rx_tag) {
 2565                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2566                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2567                         if (rxd->rx_dmamap) {
 2568                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2569                                     rxd->rx_dmamap);
 2570                                 rxd->rx_dmamap = NULL;
 2571                         }
 2572                 }
 2573                 if (sc_if->msk_cdata.msk_rx_sparemap) {
 2574                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2575                             sc_if->msk_cdata.msk_rx_sparemap);
 2576                         sc_if->msk_cdata.msk_rx_sparemap = 0;
 2577                 }
 2578                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2579                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2580         }
 2581         if (sc_if->msk_cdata.msk_parent_tag) {
 2582                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2583                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2584         }
 2585 }
 2586 
 2587 static void
 2588 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
 2589 {
 2590         struct msk_rxdesc *jrxd;
 2591         int i;
 2592 
 2593         /* Jumbo Rx ring. */
 2594         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2595                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
 2596                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2597                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2598                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
 2599                     sc_if->msk_rdata.msk_jumbo_rx_ring)
 2600                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2601                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2602                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2603                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2604                 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
 2605                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2606                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2607         }
 2608         /* Jumbo Rx buffers. */
 2609         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2610                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2611                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2612                         if (jrxd->rx_dmamap) {
 2613                                 bus_dmamap_destroy(
 2614                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2615                                     jrxd->rx_dmamap);
 2616                                 jrxd->rx_dmamap = NULL;
 2617                         }
 2618                 }
 2619                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2620                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2621                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2622                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2623                 }
 2624                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2625                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2626         }
 2627 }
 2628 
 2629 static int
 2630 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2631 {
 2632         struct msk_txdesc *txd, *txd_last;
 2633         struct msk_tx_desc *tx_le;
 2634         struct mbuf *m;
 2635         bus_dmamap_t map;
 2636         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2637         uint32_t control, csum, prod, si;
 2638         uint16_t offset, tcp_offset, tso_mtu;
 2639         int error, i, nseg, tso;
 2640 
 2641         MSK_IF_LOCK_ASSERT(sc_if);
 2642 
 2643         tcp_offset = offset = 0;
 2644         m = *m_head;
 2645         if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2646             (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
 2647             ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 2648             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
 2649                 /*
 2650                  * Since mbuf has no protocol specific structure information
 2651                  * in it we have to inspect protocol information here to
 2652                  * setup TSO and checksum offload. I don't know why Marvell
 2653                  * made a such decision in chip design because other GigE
 2654                  * hardwares normally takes care of all these chores in
 2655                  * hardware. However, TSO performance of Yukon II is very
 2656                  * good such that it's worth to implement it.
 2657                  */
 2658                 struct ether_header *eh;
 2659                 struct ip *ip;
 2660                 struct tcphdr *tcp;
 2661 
 2662                 if (M_WRITABLE(m) == 0) {
 2663                         /* Get a writable copy. */
 2664                         m = m_dup(*m_head, M_DONTWAIT);
 2665                         m_freem(*m_head);
 2666                         if (m == NULL) {
 2667                                 *m_head = NULL;
 2668                                 return (ENOBUFS);
 2669                         }
 2670                         *m_head = m;
 2671                 }
 2672 
 2673                 offset = sizeof(struct ether_header);
 2674                 m = m_pullup(m, offset);
 2675                 if (m == NULL) {
 2676                         *m_head = NULL;
 2677                         return (ENOBUFS);
 2678                 }
 2679                 eh = mtod(m, struct ether_header *);
 2680                 /* Check if hardware VLAN insertion is off. */
 2681                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2682                         offset = sizeof(struct ether_vlan_header);
 2683                         m = m_pullup(m, offset);
 2684                         if (m == NULL) {
 2685                                 *m_head = NULL;
 2686                                 return (ENOBUFS);
 2687                         }
 2688                 }
 2689                 m = m_pullup(m, offset + sizeof(struct ip));
 2690                 if (m == NULL) {
 2691                         *m_head = NULL;
 2692                         return (ENOBUFS);
 2693                 }
 2694                 ip = (struct ip *)(mtod(m, char *) + offset);
 2695                 offset += (ip->ip_hl << 2);
 2696                 tcp_offset = offset;
 2697                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2698                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2699                         if (m == NULL) {
 2700                                 *m_head = NULL;
 2701                                 return (ENOBUFS);
 2702                         }
 2703                         tcp = (struct tcphdr *)(mtod(m, char *) + offset);
 2704                         offset += (tcp->th_off << 2);
 2705                 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2706                     (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
 2707                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2708                         /*
 2709                          * It seems that Yukon II has Tx checksum offload bug
 2710                          * for small TCP packets that's less than 60 bytes in
 2711                          * size (e.g. TCP window probe packet, pure ACK packet).
 2712                          * Common work around like padding with zeros to make
 2713                          * the frame minimum ethernet frame size didn't work at
 2714                          * all.
 2715                          * Instead of disabling checksum offload completely we
 2716                          * resort to S/W checksum routine when we encounter
 2717                          * short TCP frames.
 2718                          * Short UDP packets appear to be handled correctly by
 2719                          * Yukon II. Also I assume this bug does not happen on
 2720                          * controllers that use newer descriptor format or
 2721                          * automatic Tx checksum calaulcation.
 2722                          */
 2723                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2724                         if (m == NULL) {
 2725                                 *m_head = NULL;
 2726                                 return (ENOBUFS);
 2727                         }
 2728                         *(uint16_t *)(m->m_data + offset +
 2729                             m->m_pkthdr.csum_data) = in_cksum_skip(m,
 2730                             m->m_pkthdr.len, offset);
 2731                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2732                 }
 2733                 *m_head = m;
 2734         }
 2735 
 2736         prod = sc_if->msk_cdata.msk_tx_prod;
 2737         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2738         txd_last = txd;
 2739         map = txd->tx_dmamap;
 2740         error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
 2741             *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2742         if (error == EFBIG) {
 2743                 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
 2744                 if (m == NULL) {
 2745                         m_freem(*m_head);
 2746                         *m_head = NULL;
 2747                         return (ENOBUFS);
 2748                 }
 2749                 *m_head = m;
 2750                 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
 2751                     map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2752                 if (error != 0) {
 2753                         m_freem(*m_head);
 2754                         *m_head = NULL;
 2755                         return (error);
 2756                 }
 2757         } else if (error != 0)
 2758                 return (error);
 2759         if (nseg == 0) {
 2760                 m_freem(*m_head);
 2761                 *m_head = NULL;
 2762                 return (EIO);
 2763         }
 2764 
 2765         /* Check number of available descriptors. */
 2766         if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
 2767             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
 2768                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
 2769                 return (ENOBUFS);
 2770         }
 2771 
 2772         control = 0;
 2773         tso = 0;
 2774         tx_le = NULL;
 2775 
 2776         /* Check TSO support. */
 2777         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2778                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2779                         tso_mtu = m->m_pkthdr.tso_segsz;
 2780                 else
 2781                         tso_mtu = offset + m->m_pkthdr.tso_segsz;
 2782                 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
 2783                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2784                         tx_le->msk_addr = htole32(tso_mtu);
 2785                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2786                                 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
 2787                         else
 2788                                 tx_le->msk_control =
 2789                                     htole32(OP_LRGLEN | HW_OWNER);
 2790                         sc_if->msk_cdata.msk_tx_cnt++;
 2791                         MSK_INC(prod, MSK_TX_RING_CNT);
 2792                         sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
 2793                 }
 2794                 tso++;
 2795         }
 2796         /* Check if we have a VLAN tag to insert. */
 2797         if ((m->m_flags & M_VLANTAG) != 0) {
 2798                 if (tx_le == NULL) {
 2799                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2800                         tx_le->msk_addr = htole32(0);
 2801                         tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2802                             htons(m->m_pkthdr.ether_vtag));
 2803                         sc_if->msk_cdata.msk_tx_cnt++;
 2804                         MSK_INC(prod, MSK_TX_RING_CNT);
 2805                 } else {
 2806                         tx_le->msk_control |= htole32(OP_VLAN |
 2807                             htons(m->m_pkthdr.ether_vtag));
 2808                 }
 2809                 control |= INS_VLAN;
 2810         }
 2811         /* Check if we have to handle checksum offload. */
 2812         if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
 2813                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
 2814                         control |= CALSUM;
 2815                 else {
 2816                         control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2817                         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2818                                 control |= UDPTCP;
 2819                         /* Checksum write position. */
 2820                         csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
 2821                         /* Checksum start position. */
 2822                         csum |= (uint32_t)tcp_offset << 16;
 2823                         if (csum != sc_if->msk_cdata.msk_last_csum) {
 2824                                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2825                                 tx_le->msk_addr = htole32(csum);
 2826                                 tx_le->msk_control = htole32(1 << 16 |
 2827                                     (OP_TCPLISW | HW_OWNER));
 2828                                 sc_if->msk_cdata.msk_tx_cnt++;
 2829                                 MSK_INC(prod, MSK_TX_RING_CNT);
 2830                                 sc_if->msk_cdata.msk_last_csum = csum;
 2831                         }
 2832                 }
 2833         }
 2834 
 2835 #ifdef MSK_64BIT_DMA
 2836         if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
 2837             sc_if->msk_cdata.msk_tx_high_addr) {
 2838                 sc_if->msk_cdata.msk_tx_high_addr =
 2839                     MSK_ADDR_HI(txsegs[0].ds_addr);
 2840                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2841                 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
 2842                 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2843                 sc_if->msk_cdata.msk_tx_cnt++;
 2844                 MSK_INC(prod, MSK_TX_RING_CNT);
 2845         }
 2846 #endif
 2847         si = prod;
 2848         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2849         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2850         if (tso == 0)
 2851                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2852                     OP_PACKET);
 2853         else
 2854                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2855                     OP_LARGESEND);
 2856         sc_if->msk_cdata.msk_tx_cnt++;
 2857         MSK_INC(prod, MSK_TX_RING_CNT);
 2858 
 2859         for (i = 1; i < nseg; i++) {
 2860                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2861 #ifdef MSK_64BIT_DMA
 2862                 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
 2863                     sc_if->msk_cdata.msk_tx_high_addr) {
 2864                         sc_if->msk_cdata.msk_tx_high_addr =
 2865                             MSK_ADDR_HI(txsegs[i].ds_addr);
 2866                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2867                         tx_le->msk_addr =
 2868                             htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
 2869                         tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2870                         sc_if->msk_cdata.msk_tx_cnt++;
 2871                         MSK_INC(prod, MSK_TX_RING_CNT);
 2872                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2873                 }
 2874 #endif
 2875                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2876                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2877                     OP_BUFFER | HW_OWNER);
 2878                 sc_if->msk_cdata.msk_tx_cnt++;
 2879                 MSK_INC(prod, MSK_TX_RING_CNT);
 2880         }
 2881         /* Update producer index. */
 2882         sc_if->msk_cdata.msk_tx_prod = prod;
 2883 
 2884         /* Set EOP on the last desciptor. */
 2885         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2886         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2887         tx_le->msk_control |= htole32(EOP);
 2888 
 2889         /* Turn the first descriptor ownership to hardware. */
 2890         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2891         tx_le->msk_control |= htole32(HW_OWNER);
 2892 
 2893         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2894         map = txd_last->tx_dmamap;
 2895         txd_last->tx_dmamap = txd->tx_dmamap;
 2896         txd->tx_dmamap = map;
 2897         txd->tx_m = m;
 2898 
 2899         /* Sync descriptors. */
 2900         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2901         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 2902             sc_if->msk_cdata.msk_tx_ring_map,
 2903             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2904 
 2905         return (0);
 2906 }
 2907 
 2908 static void
 2909 msk_start(struct ifnet *ifp)
 2910 {
 2911         struct msk_if_softc *sc_if;
 2912 
 2913         sc_if = ifp->if_softc;
 2914         MSK_IF_LOCK(sc_if);
 2915         msk_start_locked(ifp);
 2916         MSK_IF_UNLOCK(sc_if);
 2917 }
 2918 
 2919 static void
 2920 msk_start_locked(struct ifnet *ifp)
 2921 {
 2922         struct msk_if_softc *sc_if;
 2923         struct mbuf *m_head;
 2924         int enq;
 2925 
 2926         sc_if = ifp->if_softc;
 2927         MSK_IF_LOCK_ASSERT(sc_if);
 2928 
 2929         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2930             IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 2931                 return;
 2932 
 2933         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2934             sc_if->msk_cdata.msk_tx_cnt <
 2935             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
 2936                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2937                 if (m_head == NULL)
 2938                         break;
 2939                 /*
 2940                  * Pack the data into the transmit ring. If we
 2941                  * don't have room, set the OACTIVE flag and wait
 2942                  * for the NIC to drain the ring.
 2943                  */
 2944                 if (msk_encap(sc_if, &m_head) != 0) {
 2945                         if (m_head == NULL)
 2946                                 break;
 2947                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2948                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2949                         break;
 2950                 }
 2951 
 2952                 enq++;
 2953                 /*
 2954                  * If there's a BPF listener, bounce a copy of this frame
 2955                  * to him.
 2956                  */
 2957                 ETHER_BPF_MTAP(ifp, m_head);
 2958         }
 2959 
 2960         if (enq > 0) {
 2961                 /* Transmit */
 2962                 CSR_WRITE_2(sc_if->msk_softc,
 2963                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2964                     sc_if->msk_cdata.msk_tx_prod);
 2965 
 2966                 /* Set a timeout in case the chip goes out to lunch. */
 2967                 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
 2968         }
 2969 }
 2970 
 2971 static void
 2972 msk_watchdog(struct msk_if_softc *sc_if)
 2973 {
 2974         struct ifnet *ifp;
 2975 
 2976         MSK_IF_LOCK_ASSERT(sc_if);
 2977 
 2978         if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
 2979                 return;
 2980         ifp = sc_if->msk_ifp;
 2981         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2982                 if (bootverbose)
 2983                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2984                            "(missed link)\n");
 2985                 ifp->if_oerrors++;
 2986                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2987                 msk_init_locked(sc_if);
 2988                 return;
 2989         }
 2990 
 2991         if_printf(ifp, "watchdog timeout\n");
 2992         ifp->if_oerrors++;
 2993         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2994         msk_init_locked(sc_if);
 2995         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2996                 msk_start_locked(ifp);
 2997 }
 2998 
 2999 static int
 3000 mskc_shutdown(device_t dev)
 3001 {
 3002         struct msk_softc *sc;
 3003         int i;
 3004 
 3005         sc = device_get_softc(dev);
 3006         MSK_LOCK(sc);
 3007         for (i = 0; i < sc->msk_num_port; i++) {
 3008                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3009                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3010                     IFF_DRV_RUNNING) != 0))
 3011                         msk_stop(sc->msk_if[i]);
 3012         }
 3013         MSK_UNLOCK(sc);
 3014 
 3015         /* Put hardware reset. */
 3016         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3017         return (0);
 3018 }
 3019 
 3020 static int
 3021 mskc_suspend(device_t dev)
 3022 {
 3023         struct msk_softc *sc;
 3024         int i;
 3025 
 3026         sc = device_get_softc(dev);
 3027 
 3028         MSK_LOCK(sc);
 3029 
 3030         for (i = 0; i < sc->msk_num_port; i++) {
 3031                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3032                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3033                     IFF_DRV_RUNNING) != 0))
 3034                         msk_stop(sc->msk_if[i]);
 3035         }
 3036 
 3037         /* Disable all interrupts. */
 3038         CSR_WRITE_4(sc, B0_IMSK, 0);
 3039         CSR_READ_4(sc, B0_IMSK);
 3040         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 3041         CSR_READ_4(sc, B0_HWE_IMSK);
 3042 
 3043         msk_phy_power(sc, MSK_PHY_POWERDOWN);
 3044 
 3045         /* Put hardware reset. */
 3046         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3047         sc->msk_pflags |= MSK_FLAG_SUSPEND;
 3048 
 3049         MSK_UNLOCK(sc);
 3050 
 3051         return (0);
 3052 }
 3053 
 3054 static int
 3055 mskc_resume(device_t dev)
 3056 {
 3057         struct msk_softc *sc;
 3058         int i;
 3059 
 3060         sc = device_get_softc(dev);
 3061 
 3062         MSK_LOCK(sc);
 3063 
 3064         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 3065         mskc_reset(sc);
 3066         for (i = 0; i < sc->msk_num_port; i++) {
 3067                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3068                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
 3069                         sc->msk_if[i]->msk_ifp->if_drv_flags &=
 3070                             ~IFF_DRV_RUNNING;
 3071                         msk_init_locked(sc->msk_if[i]);
 3072                 }
 3073         }
 3074         sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
 3075 
 3076         MSK_UNLOCK(sc);
 3077 
 3078         return (0);
 3079 }
 3080 
 3081 #ifndef __NO_STRICT_ALIGNMENT
 3082 static __inline void
 3083 msk_fixup_rx(struct mbuf *m)
 3084 {
 3085         int i;
 3086         uint16_t *src, *dst;
 3087 
 3088         src = mtod(m, uint16_t *);
 3089         dst = src - 3;
 3090 
 3091         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 3092                 *dst++ = *src++;
 3093 
 3094         m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
 3095 }
 3096 #endif
 3097 
 3098 static __inline void
 3099 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
 3100 {
 3101         struct ether_header *eh;
 3102         struct ip *ip;
 3103         struct udphdr *uh;
 3104         int32_t hlen, len, pktlen, temp32;
 3105         uint16_t csum, *opts;
 3106 
 3107         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
 3108                 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3109                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3110                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3111                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3112                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3113                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3114                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3115                                     CSUM_PSEUDO_HDR;
 3116                                 m->m_pkthdr.csum_data = 0xffff;
 3117                         }
 3118                 }
 3119                 return;
 3120         }
 3121         /*
 3122          * Marvell Yukon controllers that support OP_RXCHKS has known
 3123          * to have various Rx checksum offloading bugs. These
 3124          * controllers can be configured to compute simple checksum
 3125          * at two different positions. So we can compute IP and TCP/UDP
 3126          * checksum at the same time. We intentionally have controller
 3127          * compute TCP/UDP checksum twice by specifying the same
 3128          * checksum start position and compare the result. If the value
 3129          * is different it would indicate the hardware logic was wrong.
 3130          */
 3131         if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
 3132                 if (bootverbose)
 3133                         device_printf(sc_if->msk_if_dev,
 3134                             "Rx checksum value mismatch!\n");
 3135                 return;
 3136         }
 3137         pktlen = m->m_pkthdr.len;
 3138         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 3139                 return;
 3140         eh = mtod(m, struct ether_header *);
 3141         if (eh->ether_type != htons(ETHERTYPE_IP))
 3142                 return;
 3143         ip = (struct ip *)(eh + 1);
 3144         if (ip->ip_v != IPVERSION)
 3145                 return;
 3146 
 3147         hlen = ip->ip_hl << 2;
 3148         pktlen -= sizeof(struct ether_header);
 3149         if (hlen < sizeof(struct ip))
 3150                 return;
 3151         if (ntohs(ip->ip_len) < hlen)
 3152                 return;
 3153         if (ntohs(ip->ip_len) != pktlen)
 3154                 return;
 3155         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 3156                 return; /* can't handle fragmented packet. */
 3157 
 3158         switch (ip->ip_p) {
 3159         case IPPROTO_TCP:
 3160                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 3161                         return;
 3162                 break;
 3163         case IPPROTO_UDP:
 3164                 if (pktlen < (hlen + sizeof(struct udphdr)))
 3165                         return;
 3166                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 3167                 if (uh->uh_sum == 0)
 3168                         return; /* no checksum */
 3169                 break;
 3170         default:
 3171                 return;
 3172         }
 3173         csum = bswap16(sc_if->msk_csum & 0xFFFF);
 3174         /* Checksum fixup for IP options. */
 3175         len = hlen - sizeof(struct ip);
 3176         if (len > 0) {
 3177                 opts = (uint16_t *)(ip + 1);
 3178                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 3179                         temp32 = csum - *opts;
 3180                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 3181                         csum = temp32 & 65535;
 3182                 }
 3183         }
 3184         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 3185         m->m_pkthdr.csum_data = csum;
 3186 }
 3187 
 3188 static void
 3189 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3190     int len)
 3191 {
 3192         struct mbuf *m;
 3193         struct ifnet *ifp;
 3194         struct msk_rxdesc *rxd;
 3195         int cons, rxlen;
 3196 
 3197         ifp = sc_if->msk_ifp;
 3198 
 3199         MSK_IF_LOCK_ASSERT(sc_if);
 3200 
 3201         cons = sc_if->msk_cdata.msk_rx_cons;
 3202         do {
 3203                 rxlen = status >> 16;
 3204                 if ((status & GMR_FS_VLAN) != 0 &&
 3205                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3206                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3207                 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
 3208                         /*
 3209                          * For controllers that returns bogus status code
 3210                          * just do minimal check and let upper stack
 3211                          * handle this frame.
 3212                          */
 3213                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 3214                                 ifp->if_ierrors++;
 3215                                 msk_discard_rxbuf(sc_if, cons);
 3216                                 break;
 3217                         }
 3218                 } else if (len > sc_if->msk_framesize ||
 3219                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3220                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3221                         /* Don't count flow-control packet as errors. */
 3222                         if ((status & GMR_FS_GOOD_FC) == 0)
 3223                                 ifp->if_ierrors++;
 3224                         msk_discard_rxbuf(sc_if, cons);
 3225                         break;
 3226                 }
 3227 #ifdef MSK_64BIT_DMA
 3228                 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
 3229                     MSK_RX_RING_CNT];
 3230 #else
 3231                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 3232 #endif
 3233                 m = rxd->rx_m;
 3234                 if (msk_newbuf(sc_if, cons) != 0) {
 3235                         ifp->if_iqdrops++;
 3236                         /* Reuse old buffer. */
 3237                         msk_discard_rxbuf(sc_if, cons);
 3238                         break;
 3239                 }
 3240                 m->m_pkthdr.rcvif = ifp;
 3241                 m->m_pkthdr.len = m->m_len = len;
 3242 #ifndef __NO_STRICT_ALIGNMENT
 3243                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3244                         msk_fixup_rx(m);
 3245 #endif
 3246                 ifp->if_ipackets++;
 3247                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3248                         msk_rxcsum(sc_if, control, m);
 3249                 /* Check for VLAN tagged packets. */
 3250                 if ((status & GMR_FS_VLAN) != 0 &&
 3251                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3252                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3253                         m->m_flags |= M_VLANTAG;
 3254                 }
 3255                 MSK_IF_UNLOCK(sc_if);
 3256                 (*ifp->if_input)(ifp, m);
 3257                 MSK_IF_LOCK(sc_if);
 3258         } while (0);
 3259 
 3260         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 3261         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 3262 }
 3263 
 3264 static void
 3265 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3266     int len)
 3267 {
 3268         struct mbuf *m;
 3269         struct ifnet *ifp;
 3270         struct msk_rxdesc *jrxd;
 3271         int cons, rxlen;
 3272 
 3273         ifp = sc_if->msk_ifp;
 3274 
 3275         MSK_IF_LOCK_ASSERT(sc_if);
 3276 
 3277         cons = sc_if->msk_cdata.msk_rx_cons;
 3278         do {
 3279                 rxlen = status >> 16;
 3280                 if ((status & GMR_FS_VLAN) != 0 &&
 3281                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3282                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3283                 if (len > sc_if->msk_framesize ||
 3284                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3285                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3286                         /* Don't count flow-control packet as errors. */
 3287                         if ((status & GMR_FS_GOOD_FC) == 0)
 3288                                 ifp->if_ierrors++;
 3289                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3290                         break;
 3291                 }
 3292 #ifdef MSK_64BIT_DMA
 3293                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
 3294                     MSK_JUMBO_RX_RING_CNT];
 3295 #else
 3296                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 3297 #endif
 3298                 m = jrxd->rx_m;
 3299                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 3300                         ifp->if_iqdrops++;
 3301                         /* Reuse old buffer. */
 3302                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3303                         break;
 3304                 }
 3305                 m->m_pkthdr.rcvif = ifp;
 3306                 m->m_pkthdr.len = m->m_len = len;
 3307 #ifndef __NO_STRICT_ALIGNMENT
 3308                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3309                         msk_fixup_rx(m);
 3310 #endif
 3311                 ifp->if_ipackets++;
 3312                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3313                         msk_rxcsum(sc_if, control, m);
 3314                 /* Check for VLAN tagged packets. */
 3315                 if ((status & GMR_FS_VLAN) != 0 &&
 3316                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3317                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3318                         m->m_flags |= M_VLANTAG;
 3319                 }
 3320                 MSK_IF_UNLOCK(sc_if);
 3321                 (*ifp->if_input)(ifp, m);
 3322                 MSK_IF_LOCK(sc_if);
 3323         } while (0);
 3324 
 3325         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 3326         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 3327 }
 3328 
 3329 static void
 3330 msk_txeof(struct msk_if_softc *sc_if, int idx)
 3331 {
 3332         struct msk_txdesc *txd;
 3333         struct msk_tx_desc *cur_tx;
 3334         struct ifnet *ifp;
 3335         uint32_t control;
 3336         int cons, prog;
 3337 
 3338         MSK_IF_LOCK_ASSERT(sc_if);
 3339 
 3340         ifp = sc_if->msk_ifp;
 3341 
 3342         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 3343             sc_if->msk_cdata.msk_tx_ring_map,
 3344             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3345         /*
 3346          * Go through our tx ring and free mbufs for those
 3347          * frames that have been sent.
 3348          */
 3349         cons = sc_if->msk_cdata.msk_tx_cons;
 3350         prog = 0;
 3351         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 3352                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 3353                         break;
 3354                 prog++;
 3355                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 3356                 control = le32toh(cur_tx->msk_control);
 3357                 sc_if->msk_cdata.msk_tx_cnt--;
 3358                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3359                 if ((control & EOP) == 0)
 3360                         continue;
 3361                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 3362                 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
 3363                     BUS_DMASYNC_POSTWRITE);
 3364                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 3365 
 3366                 ifp->if_opackets++;
 3367                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 3368                     __func__));
 3369                 m_freem(txd->tx_m);
 3370                 txd->tx_m = NULL;
 3371         }
 3372 
 3373         if (prog > 0) {
 3374                 sc_if->msk_cdata.msk_tx_cons = cons;
 3375                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 3376                         sc_if->msk_watchdog_timer = 0;
 3377                 /* No need to sync LEs as we didn't update LEs. */
 3378         }
 3379 }
 3380 
 3381 static void
 3382 msk_tick(void *xsc_if)
 3383 {
 3384         struct msk_if_softc *sc_if;
 3385         struct mii_data *mii;
 3386 
 3387         sc_if = xsc_if;
 3388 
 3389         MSK_IF_LOCK_ASSERT(sc_if);
 3390 
 3391         mii = device_get_softc(sc_if->msk_miibus);
 3392 
 3393         mii_tick(mii);
 3394         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 3395                 msk_miibus_statchg(sc_if->msk_if_dev);
 3396         msk_handle_events(sc_if->msk_softc);
 3397         msk_watchdog(sc_if);
 3398         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3399 }
 3400 
 3401 static void
 3402 msk_intr_phy(struct msk_if_softc *sc_if)
 3403 {
 3404         uint16_t status;
 3405 
 3406         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3407         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3408         /* Handle FIFO Underrun/Overflow? */
 3409         if ((status & PHY_M_IS_FIFO_ERROR))
 3410                 device_printf(sc_if->msk_if_dev,
 3411                     "PHY FIFO underrun/overflow.\n");
 3412 }
 3413 
 3414 static void
 3415 msk_intr_gmac(struct msk_if_softc *sc_if)
 3416 {
 3417         struct msk_softc *sc;
 3418         uint8_t status;
 3419 
 3420         sc = sc_if->msk_softc;
 3421         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3422 
 3423         /* GMAC Rx FIFO overrun. */
 3424         if ((status & GM_IS_RX_FF_OR) != 0)
 3425                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3426                     GMF_CLI_RX_FO);
 3427         /* GMAC Tx FIFO underrun. */
 3428         if ((status & GM_IS_TX_FF_UR) != 0) {
 3429                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3430                     GMF_CLI_TX_FU);
 3431                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3432                 /*
 3433                  * XXX
 3434                  * In case of Tx underrun, we may need to flush/reset
 3435                  * Tx MAC but that would also require resynchronization
 3436                  * with status LEs. Reintializing status LEs would
 3437                  * affect other port in dual MAC configuration so it
 3438                  * should be avoided as possible as we can.
 3439                  * Due to lack of documentation it's all vague guess but
 3440                  * it needs more investigation.
 3441                  */
 3442         }
 3443 }
 3444 
 3445 static void
 3446 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3447 {
 3448         struct msk_softc *sc;
 3449 
 3450         sc = sc_if->msk_softc;
 3451         if ((status & Y2_IS_PAR_RD1) != 0) {
 3452                 device_printf(sc_if->msk_if_dev,
 3453                     "RAM buffer read parity error\n");
 3454                 /* Clear IRQ. */
 3455                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3456                     RI_CLR_RD_PERR);
 3457         }
 3458         if ((status & Y2_IS_PAR_WR1) != 0) {
 3459                 device_printf(sc_if->msk_if_dev,
 3460                     "RAM buffer write parity error\n");
 3461                 /* Clear IRQ. */
 3462                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3463                     RI_CLR_WR_PERR);
 3464         }
 3465         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3466                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3467                 /* Clear IRQ. */
 3468                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3469                     GMF_CLI_TX_PE);
 3470         }
 3471         if ((status & Y2_IS_PAR_RX1) != 0) {
 3472                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3473                 /* Clear IRQ. */
 3474                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3475         }
 3476         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3477                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3478                 /* Clear IRQ. */
 3479                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3480         }
 3481 }
 3482 
 3483 static void
 3484 msk_intr_hwerr(struct msk_softc *sc)
 3485 {
 3486         uint32_t status;
 3487         uint32_t tlphead[4];
 3488 
 3489         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3490         /* Time Stamp timer overflow. */
 3491         if ((status & Y2_IS_TIST_OV) != 0)
 3492                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3493         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3494                 /*
 3495                  * PCI Express Error occured which is not described in PEX
 3496                  * spec.
 3497                  * This error is also mapped either to Master Abort(
 3498                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3499                  * can only be cleared there.
 3500                  */
 3501                 device_printf(sc->msk_dev,
 3502                     "PCI Express protocol violation error\n");
 3503         }
 3504 
 3505         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3506                 uint16_t v16;
 3507 
 3508                 if ((status & Y2_IS_MST_ERR) != 0)
 3509                         device_printf(sc->msk_dev,
 3510                             "unexpected IRQ Status error\n");
 3511                 else
 3512                         device_printf(sc->msk_dev,
 3513                             "unexpected IRQ Master error\n");
 3514                 /* Reset all bits in the PCI status register. */
 3515                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3516                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3517                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3518                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3519                     PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 3520                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3521         }
 3522 
 3523         /* Check for PCI Express Uncorrectable Error. */
 3524         if ((status & Y2_IS_PCI_EXP) != 0) {
 3525                 uint32_t v32;
 3526 
 3527                 /*
 3528                  * On PCI Express bus bridges are called root complexes (RC).
 3529                  * PCI Express errors are recognized by the root complex too,
 3530                  * which requests the system to handle the problem. After
 3531                  * error occurence it may be that no access to the adapter
 3532                  * may be performed any longer.
 3533                  */
 3534 
 3535                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3536                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3537                         /* Ignore unsupported request error. */
 3538                         device_printf(sc->msk_dev,
 3539                             "Uncorrectable PCI Express error\n");
 3540                 }
 3541                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3542                         int i;
 3543 
 3544                         /* Get TLP header form Log Registers. */
 3545                         for (i = 0; i < 4; i++)
 3546                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3547                                     PEX_HEADER_LOG + i * 4);
 3548                         /* Check for vendor defined broadcast message. */
 3549                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3550                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3551                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3552                                     sc->msk_intrhwemask);
 3553                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3554                         }
 3555                 }
 3556                 /* Clear the interrupt. */
 3557                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3558                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3559                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3560         }
 3561 
 3562         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3563                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3564         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3565                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3566 }
 3567 
 3568 static __inline void
 3569 msk_rxput(struct msk_if_softc *sc_if)
 3570 {
 3571         struct msk_softc *sc;
 3572 
 3573         sc = sc_if->msk_softc;
 3574         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
 3575                 bus_dmamap_sync(
 3576                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3577                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3578                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3579         else
 3580                 bus_dmamap_sync(
 3581                     sc_if->msk_cdata.msk_rx_ring_tag,
 3582                     sc_if->msk_cdata.msk_rx_ring_map,
 3583                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3584         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3585             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3586 }
 3587 
 3588 static int
 3589 msk_handle_events(struct msk_softc *sc)
 3590 {
 3591         struct msk_if_softc *sc_if;
 3592         int rxput[2];
 3593         struct msk_stat_desc *sd;
 3594         uint32_t control, status;
 3595         int cons, len, port, rxprog;
 3596 
 3597         if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
 3598                 return (0);
 3599 
 3600         /* Sync status LEs. */
 3601         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3602             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3603 
 3604         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3605         rxprog = 0;
 3606         cons = sc->msk_stat_cons;
 3607         for (;;) {
 3608                 sd = &sc->msk_stat_ring[cons];
 3609                 control = le32toh(sd->msk_control);
 3610                 if ((control & HW_OWNER) == 0)
 3611                         break;
 3612                 control &= ~HW_OWNER;
 3613                 sd->msk_control = htole32(control);
 3614                 status = le32toh(sd->msk_status);
 3615                 len = control & STLE_LEN_MASK;
 3616                 port = (control >> 16) & 0x01;
 3617                 sc_if = sc->msk_if[port];
 3618                 if (sc_if == NULL) {
 3619                         device_printf(sc->msk_dev, "invalid port opcode "
 3620                             "0x%08x\n", control & STLE_OP_MASK);
 3621                         continue;
 3622                 }
 3623 
 3624                 switch (control & STLE_OP_MASK) {
 3625                 case OP_RXVLAN:
 3626                         sc_if->msk_vtag = ntohs(len);
 3627                         break;
 3628                 case OP_RXCHKSVLAN:
 3629                         sc_if->msk_vtag = ntohs(len);
 3630                         /* FALLTHROUGH */
 3631                 case OP_RXCHKS:
 3632                         sc_if->msk_csum = status;
 3633                         break;
 3634                 case OP_RXSTAT:
 3635                         if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
 3636                                 break;
 3637                         if (sc_if->msk_framesize >
 3638                             (MCLBYTES - MSK_RX_BUF_ALIGN))
 3639                                 msk_jumbo_rxeof(sc_if, status, control, len);
 3640                         else
 3641                                 msk_rxeof(sc_if, status, control, len);
 3642                         rxprog++;
 3643                         /*
 3644                          * Because there is no way to sync single Rx LE
 3645                          * put the DMA sync operation off until the end of
 3646                          * event processing.
 3647                          */
 3648                         rxput[port]++;
 3649                         /* Update prefetch unit if we've passed water mark. */
 3650                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3651                                 msk_rxput(sc_if);
 3652                                 rxput[port] = 0;
 3653                         }
 3654                         break;
 3655                 case OP_TXINDEXLE:
 3656                         if (sc->msk_if[MSK_PORT_A] != NULL)
 3657                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3658                                     status & STLE_TXA1_MSKL);
 3659                         if (sc->msk_if[MSK_PORT_B] != NULL)
 3660                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3661                                     ((status & STLE_TXA2_MSKL) >>
 3662                                     STLE_TXA2_SHIFTL) |
 3663                                     ((len & STLE_TXA2_MSKH) <<
 3664                                     STLE_TXA2_SHIFTH));
 3665                         break;
 3666                 default:
 3667                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3668                             control & STLE_OP_MASK);
 3669                         break;
 3670                 }
 3671                 MSK_INC(cons, sc->msk_stat_count);
 3672                 if (rxprog > sc->msk_process_limit)
 3673                         break;
 3674         }
 3675 
 3676         sc->msk_stat_cons = cons;
 3677         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3678             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3679 
 3680         if (rxput[MSK_PORT_A] > 0)
 3681                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3682         if (rxput[MSK_PORT_B] > 0)
 3683                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3684 
 3685         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3686 }
 3687 
 3688 static void
 3689 msk_intr(void *xsc)
 3690 {
 3691         struct msk_softc *sc;
 3692         struct msk_if_softc *sc_if0, *sc_if1;
 3693         struct ifnet *ifp0, *ifp1;
 3694         uint32_t status;
 3695         int domore;
 3696 
 3697         sc = xsc;
 3698         MSK_LOCK(sc);
 3699 
 3700         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3701         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3702         if (status == 0 || status == 0xffffffff ||
 3703             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3704             (status & sc->msk_intrmask) == 0) {
 3705                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3706                 MSK_UNLOCK(sc);
 3707                 return;
 3708         }
 3709 
 3710         sc_if0 = sc->msk_if[MSK_PORT_A];
 3711         sc_if1 = sc->msk_if[MSK_PORT_B];
 3712         ifp0 = ifp1 = NULL;
 3713         if (sc_if0 != NULL)
 3714                 ifp0 = sc_if0->msk_ifp;
 3715         if (sc_if1 != NULL)
 3716                 ifp1 = sc_if1->msk_ifp;
 3717 
 3718         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3719                 msk_intr_phy(sc_if0);
 3720         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3721                 msk_intr_phy(sc_if1);
 3722         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3723                 msk_intr_gmac(sc_if0);
 3724         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3725                 msk_intr_gmac(sc_if1);
 3726         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3727                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3728                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3729                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3730                 CSR_READ_4(sc, B0_IMSK);
 3731         }
 3732         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3733                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3734                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3735                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3736                 CSR_READ_4(sc, B0_IMSK);
 3737         }
 3738         if ((status & Y2_IS_HW_ERR) != 0)
 3739                 msk_intr_hwerr(sc);
 3740 
 3741         domore = msk_handle_events(sc);
 3742         if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
 3743                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3744 
 3745         /* Reenable interrupts. */
 3746         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3747 
 3748         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3749             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3750                 msk_start_locked(ifp0);
 3751         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3752             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3753                 msk_start_locked(ifp1);
 3754 
 3755         MSK_UNLOCK(sc);
 3756 }
 3757 
 3758 static void
 3759 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3760 {
 3761         struct msk_softc *sc;
 3762         struct ifnet *ifp;
 3763 
 3764         ifp = sc_if->msk_ifp;
 3765         sc = sc_if->msk_softc;
 3766         if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
 3767             sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
 3768             sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
 3769                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3770                     TX_STFW_ENA);
 3771         } else {
 3772                 if (ifp->if_mtu > ETHERMTU) {
 3773                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3774                         CSR_WRITE_4(sc,
 3775                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3776                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3777                         /* Disable Store & Forward mode for Tx. */
 3778                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3779                             TX_STFW_DIS);
 3780                 } else {
 3781                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3782                             TX_STFW_ENA);
 3783                 }
 3784         }
 3785 }
 3786 
 3787 static void
 3788 msk_init(void *xsc)
 3789 {
 3790         struct msk_if_softc *sc_if = xsc;
 3791 
 3792         MSK_IF_LOCK(sc_if);
 3793         msk_init_locked(sc_if);
 3794         MSK_IF_UNLOCK(sc_if);
 3795 }
 3796 
 3797 static void
 3798 msk_init_locked(struct msk_if_softc *sc_if)
 3799 {
 3800         struct msk_softc *sc;
 3801         struct ifnet *ifp;
 3802         struct mii_data  *mii;
 3803         uint8_t *eaddr;
 3804         uint16_t gmac;
 3805         uint32_t reg;
 3806         int error;
 3807 
 3808         MSK_IF_LOCK_ASSERT(sc_if);
 3809 
 3810         ifp = sc_if->msk_ifp;
 3811         sc = sc_if->msk_softc;
 3812         mii = device_get_softc(sc_if->msk_miibus);
 3813 
 3814         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 3815                 return;
 3816 
 3817         error = 0;
 3818         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3819         msk_stop(sc_if);
 3820 
 3821         if (ifp->if_mtu < ETHERMTU)
 3822                 sc_if->msk_framesize = ETHERMTU;
 3823         else
 3824                 sc_if->msk_framesize = ifp->if_mtu;
 3825         sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3826         if (ifp->if_mtu > ETHERMTU &&
 3827             (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 3828                 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 3829                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 3830         }
 3831 
 3832         /* GMAC Control reset. */
 3833         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3834         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3835         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3836         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3837             sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 3838                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3839                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3840                     GMC_BYP_RETR_ON);
 3841 
 3842         /*
 3843          * Initialize GMAC first such that speed/duplex/flow-control
 3844          * parameters are renegotiated when interface is brought up.
 3845          */
 3846         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3847 
 3848         /* Dummy read the Interrupt Source Register. */
 3849         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3850 
 3851         /* Clear MIB stats. */
 3852         msk_stats_clear(sc_if);
 3853 
 3854         /* Disable FCS. */
 3855         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3856 
 3857         /* Setup Transmit Control Register. */
 3858         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3859 
 3860         /* Setup Transmit Flow Control Register. */
 3861         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3862 
 3863         /* Setup Transmit Parameter Register. */
 3864         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3865             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3866             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3867 
 3868         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3869             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3870 
 3871         if (ifp->if_mtu > ETHERMTU)
 3872                 gmac |= GM_SMOD_JUMBO_ENA;
 3873         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3874 
 3875         /* Set station address. */
 3876         eaddr = IF_LLADDR(ifp);
 3877         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
 3878             eaddr[0] | (eaddr[1] << 8));
 3879         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
 3880             eaddr[2] | (eaddr[3] << 8));
 3881         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
 3882             eaddr[4] | (eaddr[5] << 8));
 3883         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
 3884             eaddr[0] | (eaddr[1] << 8));
 3885         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
 3886             eaddr[2] | (eaddr[3] << 8));
 3887         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
 3888             eaddr[4] | (eaddr[5] << 8));
 3889 
 3890         /* Disable interrupts for counter overflows. */
 3891         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3892         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3893         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3894 
 3895         /* Configure Rx MAC FIFO. */
 3896         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3897         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3898         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3899         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3900             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3901                 reg |= GMF_RX_OVER_ON;
 3902         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3903 
 3904         /* Set receive filter. */
 3905         msk_rxfilter(sc_if);
 3906 
 3907         if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 3908                 /* Clear flush mask - HW bug. */
 3909                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
 3910         } else {
 3911                 /* Flush Rx MAC FIFO on any flow control or error. */
 3912                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3913                     GMR_FS_ANY_ERR);
 3914         }
 3915 
 3916         /*
 3917          * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
 3918          * due to hardware hang on receipt of pause frames.
 3919          */
 3920         reg = RX_GMF_FL_THR_DEF + 1;
 3921         /* Another magic for Yukon FE+ - From Linux. */
 3922         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3923             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3924                 reg = 0x178;
 3925         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3926 
 3927         /* Configure Tx MAC FIFO. */
 3928         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3929         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3930         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3931 
 3932         /* Configure hardware VLAN tag insertion/stripping. */
 3933         msk_setvlan(sc_if, ifp);
 3934 
 3935         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3936                 /* Set Rx Pause threshould. */
 3937                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3938                     MSK_ECU_LLPP);
 3939                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3940                     MSK_ECU_ULPP);
 3941                 /* Configure store-and-forward for Tx. */
 3942                 msk_set_tx_stfwd(sc_if);
 3943         }
 3944 
 3945         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3946             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3947                 /* Disable dynamic watermark - from Linux. */
 3948                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3949                 reg &= ~0x03;
 3950                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3951         }
 3952 
 3953         /*
 3954          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3955          * arbiter as we don't use Sync Tx queue.
 3956          */
 3957         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3958             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3959         /* Enable the RAM Interface Arbiter. */
 3960         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3961 
 3962         /* Setup RAM buffer. */
 3963         msk_set_rambuffer(sc_if);
 3964 
 3965         /* Disable Tx sync Queue. */
 3966         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3967 
 3968         /* Setup Tx Queue Bus Memory Interface. */
 3969         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3970         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3971         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3972         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3973         switch (sc->msk_hw_id) {
 3974         case CHIP_ID_YUKON_EC_U:
 3975                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3976                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3977                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3978                             MSK_ECU_TXFF_LEV);
 3979                 }
 3980                 break;
 3981         case CHIP_ID_YUKON_EX:
 3982                 /*
 3983                  * Yukon Extreme seems to have silicon bug for
 3984                  * automatic Tx checksum calculation capability.
 3985                  */
 3986                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 3987                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3988                             F_TX_CHK_AUTO_OFF);
 3989                 break;
 3990         }
 3991 
 3992         /* Setup Rx Queue Bus Memory Interface. */
 3993         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 3994         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 3995         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 3996         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 3997         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 3998             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 3999                 /* MAC Rx RAM Read is controlled by hardware. */
 4000                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 4001         }
 4002 
 4003         msk_set_prefetch(sc, sc_if->msk_txq,
 4004             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 4005         msk_init_tx_ring(sc_if);
 4006 
 4007         /* Disable Rx checksum offload and RSS hash. */
 4008         reg = BMU_DIS_RX_RSS_HASH;
 4009         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 4010             (ifp->if_capenable & IFCAP_RXCSUM) != 0)
 4011                 reg |= BMU_ENA_RX_CHKSUM;
 4012         else
 4013                 reg |= BMU_DIS_RX_CHKSUM;
 4014         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
 4015         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
 4016                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4017                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 4018                     MSK_JUMBO_RX_RING_CNT - 1);
 4019                 error = msk_init_jumbo_rx_ring(sc_if);
 4020          } else {
 4021                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4022                     sc_if->msk_rdata.msk_rx_ring_paddr,
 4023                     MSK_RX_RING_CNT - 1);
 4024                 error = msk_init_rx_ring(sc_if);
 4025         }
 4026         if (error != 0) {
 4027                 device_printf(sc_if->msk_if_dev,
 4028                     "initialization failed: no memory for Rx buffers\n");
 4029                 msk_stop(sc_if);
 4030                 return;
 4031         }
 4032         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 4033             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 4034                 /* Disable flushing of non-ASF packets. */
 4035                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 4036                     GMF_RX_MACSEC_FLUSH_OFF);
 4037         }
 4038 
 4039         /* Configure interrupt handling. */
 4040         if (sc_if->msk_port == MSK_PORT_A) {
 4041                 sc->msk_intrmask |= Y2_IS_PORT_A;
 4042                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 4043         } else {
 4044                 sc->msk_intrmask |= Y2_IS_PORT_B;
 4045                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 4046         }
 4047         /* Configure IRQ moderation mask. */
 4048         CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
 4049         if (sc->msk_int_holdoff > 0) {
 4050                 /* Configure initial IRQ moderation timer value. */
 4051                 CSR_WRITE_4(sc, B2_IRQM_INI,
 4052                     MSK_USECS(sc, sc->msk_int_holdoff));
 4053                 CSR_WRITE_4(sc, B2_IRQM_VAL,
 4054                     MSK_USECS(sc, sc->msk_int_holdoff));
 4055                 /* Start IRQ moderation. */
 4056                 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
 4057         }
 4058         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4059         CSR_READ_4(sc, B0_HWE_IMSK);
 4060         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4061         CSR_READ_4(sc, B0_IMSK);
 4062 
 4063         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4064         mii_mediachg(mii);
 4065 
 4066         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 4067         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 4068 
 4069         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 4070 }
 4071 
 4072 static void
 4073 msk_set_rambuffer(struct msk_if_softc *sc_if)
 4074 {
 4075         struct msk_softc *sc;
 4076         int ltpp, utpp;
 4077 
 4078         sc = sc_if->msk_softc;
 4079         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 4080                 return;
 4081 
 4082         /* Setup Rx Queue. */
 4083         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 4084         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 4085             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4086         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 4087             sc->msk_rxqend[sc_if->msk_port] / 8);
 4088         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 4089             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4090         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 4091             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4092 
 4093         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4094             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 4095         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4096             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 4097         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 4098                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 4099         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 4100         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 4101         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 4102 
 4103         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 4104         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 4105 
 4106         /* Setup Tx Queue. */
 4107         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 4108         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 4109             sc->msk_txqstart[sc_if->msk_port] / 8);
 4110         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 4111             sc->msk_txqend[sc_if->msk_port] / 8);
 4112         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 4113             sc->msk_txqstart[sc_if->msk_port] / 8);
 4114         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 4115             sc->msk_txqstart[sc_if->msk_port] / 8);
 4116         /* Enable Store & Forward for Tx side. */
 4117         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 4118         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 4119         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 4120 }
 4121 
 4122 static void
 4123 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 4124     uint32_t count)
 4125 {
 4126 
 4127         /* Reset the prefetch unit. */
 4128         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4129             PREF_UNIT_RST_SET);
 4130         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4131             PREF_UNIT_RST_CLR);
 4132         /* Set LE base address. */
 4133         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 4134             MSK_ADDR_LO(addr));
 4135         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 4136             MSK_ADDR_HI(addr));
 4137         /* Set the list last index. */
 4138         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 4139             count);
 4140         /* Turn on prefetch unit. */
 4141         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4142             PREF_UNIT_OP_ON);
 4143         /* Dummy read to ensure write. */
 4144         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 4145 }
 4146 
 4147 static void
 4148 msk_stop(struct msk_if_softc *sc_if)
 4149 {
 4150         struct msk_softc *sc;
 4151         struct msk_txdesc *txd;
 4152         struct msk_rxdesc *rxd;
 4153         struct msk_rxdesc *jrxd;
 4154         struct ifnet *ifp;
 4155         uint32_t val;
 4156         int i;
 4157 
 4158         MSK_IF_LOCK_ASSERT(sc_if);
 4159         sc = sc_if->msk_softc;
 4160         ifp = sc_if->msk_ifp;
 4161 
 4162         callout_stop(&sc_if->msk_tick_ch);
 4163         sc_if->msk_watchdog_timer = 0;
 4164 
 4165         /* Disable interrupts. */
 4166         if (sc_if->msk_port == MSK_PORT_A) {
 4167                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 4168                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 4169         } else {
 4170                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 4171                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 4172         }
 4173         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4174         CSR_READ_4(sc, B0_HWE_IMSK);
 4175         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4176         CSR_READ_4(sc, B0_IMSK);
 4177 
 4178         /* Disable Tx/Rx MAC. */
 4179         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4180         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 4181         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 4182         /* Read again to ensure writing. */
 4183         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4184         /* Update stats and clear counters. */
 4185         msk_stats_update(sc_if);
 4186 
 4187         /* Stop Tx BMU. */
 4188         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 4189         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4190         for (i = 0; i < MSK_TIMEOUT; i++) {
 4191                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 4192                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4193                             BMU_STOP);
 4194                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4195                 } else
 4196                         break;
 4197                 DELAY(1);
 4198         }
 4199         if (i == MSK_TIMEOUT)
 4200                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 4201         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 4202             RB_RST_SET | RB_DIS_OP_MD);
 4203 
 4204         /* Disable all GMAC interrupt. */
 4205         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 4206         /* Disable PHY interrupt. */
 4207         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 4208 
 4209         /* Disable the RAM Interface Arbiter. */
 4210         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 4211 
 4212         /* Reset the PCI FIFO of the async Tx queue */
 4213         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4214             BMU_RST_SET | BMU_FIFO_RST);
 4215 
 4216         /* Reset the Tx prefetch units. */
 4217         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 4218             PREF_UNIT_RST_SET);
 4219 
 4220         /* Reset the RAM Buffer async Tx queue. */
 4221         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 4222 
 4223         /* Reset Tx MAC FIFO. */
 4224         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 4225         /* Set Pause Off. */
 4226         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 4227 
 4228         /*
 4229          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 4230          * reach the end of packet and since we can't make sure that we have
 4231          * incoming data, we must reset the BMU while it is not during a DMA
 4232          * transfer. Since it is possible that the Rx path is still active,
 4233          * the Rx RAM buffer will be stopped first, so any possible incoming
 4234          * data will not trigger a DMA. After the RAM buffer is stopped, the
 4235          * BMU is polled until any DMA in progress is ended and only then it
 4236          * will be reset.
 4237          */
 4238 
 4239         /* Disable the RAM Buffer receive queue. */
 4240         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 4241         for (i = 0; i < MSK_TIMEOUT; i++) {
 4242                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 4243                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 4244                         break;
 4245                 DELAY(1);
 4246         }
 4247         if (i == MSK_TIMEOUT)
 4248                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 4249         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 4250             BMU_RST_SET | BMU_FIFO_RST);
 4251         /* Reset the Rx prefetch unit. */
 4252         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 4253             PREF_UNIT_RST_SET);
 4254         /* Reset the RAM Buffer receive queue. */
 4255         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 4256         /* Reset Rx MAC FIFO. */
 4257         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 4258 
 4259         /* Free Rx and Tx mbufs still in the queues. */
 4260         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 4261                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 4262                 if (rxd->rx_m != NULL) {
 4263                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
 4264                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4265                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 4266                             rxd->rx_dmamap);
 4267                         m_freem(rxd->rx_m);
 4268                         rxd->rx_m = NULL;
 4269                 }
 4270         }
 4271         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 4272                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 4273                 if (jrxd->rx_m != NULL) {
 4274                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4275                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4276                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4277                             jrxd->rx_dmamap);
 4278                         m_freem(jrxd->rx_m);
 4279                         jrxd->rx_m = NULL;
 4280                 }
 4281         }
 4282         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 4283                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 4284                 if (txd->tx_m != NULL) {
 4285                         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
 4286                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 4287                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 4288                             txd->tx_dmamap);
 4289                         m_freem(txd->tx_m);
 4290                         txd->tx_m = NULL;
 4291                 }
 4292         }
 4293 
 4294         /*
 4295          * Mark the interface down.
 4296          */
 4297         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 4298         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4299 }
 4300 
 4301 /*
 4302  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
 4303  * counter clears high 16 bits of the counter such that accessing
 4304  * lower 16 bits should be the last operation.
 4305  */
 4306 #define MSK_READ_MIB32(x, y)                                    \
 4307         (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +       \
 4308         (uint32_t)GMAC_READ_2(sc, x, y)
 4309 #define MSK_READ_MIB64(x, y)                                    \
 4310         (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +        \
 4311         (uint64_t)MSK_READ_MIB32(x, y)
 4312 
 4313 static void
 4314 msk_stats_clear(struct msk_if_softc *sc_if)
 4315 {
 4316         struct msk_softc *sc;
 4317         uint32_t reg;
 4318         uint16_t gmac;
 4319         int i;
 4320 
 4321         MSK_IF_LOCK_ASSERT(sc_if);
 4322 
 4323         sc = sc_if->msk_softc;
 4324         /* Set MIB Clear Counter Mode. */
 4325         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4326         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4327         /* Read all MIB Counters with Clear Mode set. */
 4328         for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
 4329                 reg = MSK_READ_MIB32(sc_if->msk_port, i);
 4330         /* Clear MIB Clear Counter Mode. */
 4331         gmac &= ~GM_PAR_MIB_CLR;
 4332         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4333 }
 4334 
 4335 static void
 4336 msk_stats_update(struct msk_if_softc *sc_if)
 4337 {
 4338         struct msk_softc *sc;
 4339         struct ifnet *ifp;
 4340         struct msk_hw_stats *stats;
 4341         uint16_t gmac;
 4342         uint32_t reg;
 4343 
 4344         MSK_IF_LOCK_ASSERT(sc_if);
 4345 
 4346         ifp = sc_if->msk_ifp;
 4347         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4348                 return;
 4349         sc = sc_if->msk_softc;
 4350         stats = &sc_if->msk_stats;
 4351         /* Set MIB Clear Counter Mode. */
 4352         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4353         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4354 
 4355         /* Rx stats. */
 4356         stats->rx_ucast_frames +=
 4357             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
 4358         stats->rx_bcast_frames +=
 4359             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
 4360         stats->rx_pause_frames +=
 4361             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
 4362         stats->rx_mcast_frames +=
 4363             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
 4364         stats->rx_crc_errs +=
 4365             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
 4366         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
 4367         stats->rx_good_octets +=
 4368             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
 4369         stats->rx_bad_octets +=
 4370             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
 4371         stats->rx_runts +=
 4372             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
 4373         stats->rx_runt_errs +=
 4374             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
 4375         stats->rx_pkts_64 +=
 4376             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
 4377         stats->rx_pkts_65_127 +=
 4378             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
 4379         stats->rx_pkts_128_255 +=
 4380             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
 4381         stats->rx_pkts_256_511 +=
 4382             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
 4383         stats->rx_pkts_512_1023 +=
 4384             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
 4385         stats->rx_pkts_1024_1518 +=
 4386             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
 4387         stats->rx_pkts_1519_max +=
 4388             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
 4389         stats->rx_pkts_too_long +=
 4390             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
 4391         stats->rx_pkts_jabbers +=
 4392             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
 4393         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
 4394         stats->rx_fifo_oflows +=
 4395             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
 4396         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
 4397 
 4398         /* Tx stats. */
 4399         stats->tx_ucast_frames +=
 4400             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
 4401         stats->tx_bcast_frames +=
 4402             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
 4403         stats->tx_pause_frames +=
 4404             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
 4405         stats->tx_mcast_frames +=
 4406             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
 4407         stats->tx_octets +=
 4408             MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
 4409         stats->tx_pkts_64 +=
 4410             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
 4411         stats->tx_pkts_65_127 +=
 4412             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
 4413         stats->tx_pkts_128_255 +=
 4414             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
 4415         stats->tx_pkts_256_511 +=
 4416             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
 4417         stats->tx_pkts_512_1023 +=
 4418             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
 4419         stats->tx_pkts_1024_1518 +=
 4420             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
 4421         stats->tx_pkts_1519_max +=
 4422             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
 4423         reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
 4424         stats->tx_colls +=
 4425             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
 4426         stats->tx_late_colls +=
 4427             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
 4428         stats->tx_excess_colls +=
 4429             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
 4430         stats->tx_multi_colls +=
 4431             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
 4432         stats->tx_single_colls +=
 4433             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
 4434         stats->tx_underflows +=
 4435             MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
 4436         /* Clear MIB Clear Counter Mode. */
 4437         gmac &= ~GM_PAR_MIB_CLR;
 4438         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4439 }
 4440 
 4441 static int
 4442 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
 4443 {
 4444         struct msk_softc *sc;
 4445         struct msk_if_softc *sc_if;
 4446         uint32_t result, *stat;
 4447         int off;
 4448 
 4449         sc_if = (struct msk_if_softc *)arg1;
 4450         sc = sc_if->msk_softc;
 4451         off = arg2;
 4452         stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
 4453 
 4454         MSK_IF_LOCK(sc_if);
 4455         result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4456         result += *stat;
 4457         MSK_IF_UNLOCK(sc_if);
 4458 
 4459         return (sysctl_handle_int(oidp, &result, 0, req));
 4460 }
 4461 
 4462 static int
 4463 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
 4464 {
 4465         struct msk_softc *sc;
 4466         struct msk_if_softc *sc_if;
 4467         uint64_t result, *stat;
 4468         int off;
 4469 
 4470         sc_if = (struct msk_if_softc *)arg1;
 4471         sc = sc_if->msk_softc;
 4472         off = arg2;
 4473         stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
 4474 
 4475         MSK_IF_LOCK(sc_if);
 4476         result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4477         result += *stat;
 4478         MSK_IF_UNLOCK(sc_if);
 4479 
 4480         return (sysctl_handle_quad(oidp, &result, 0, req));
 4481 }
 4482 
 4483 #undef MSK_READ_MIB32
 4484 #undef MSK_READ_MIB64
 4485 
 4486 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)                            \
 4487         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4488             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,    \
 4489             "IU", d)
 4490 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)                            \
 4491         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4492             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,    \
 4493             "Q", d)
 4494 
 4495 static void
 4496 msk_sysctl_node(struct msk_if_softc *sc_if)
 4497 {
 4498         struct sysctl_ctx_list *ctx;
 4499         struct sysctl_oid_list *child, *schild;
 4500         struct sysctl_oid *tree;
 4501 
 4502         ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
 4503         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
 4504 
 4505         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 4506             NULL, "MSK Statistics");
 4507         schild = child = SYSCTL_CHILDREN(tree);
 4508         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
 4509             NULL, "MSK RX Statistics");
 4510         child = SYSCTL_CHILDREN(tree);
 4511         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4512             child, rx_ucast_frames, "Good unicast frames");
 4513         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4514             child, rx_bcast_frames, "Good broadcast frames");
 4515         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4516             child, rx_pause_frames, "Pause frames");
 4517         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4518             child, rx_mcast_frames, "Multicast frames");
 4519         MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
 4520             child, rx_crc_errs, "CRC errors");
 4521         MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
 4522             child, rx_good_octets, "Good octets");
 4523         MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
 4524             child, rx_bad_octets, "Bad octets");
 4525         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4526             child, rx_pkts_64, "64 bytes frames");
 4527         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4528             child, rx_pkts_65_127, "65 to 127 bytes frames");
 4529         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4530             child, rx_pkts_128_255, "128 to 255 bytes frames");
 4531         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4532             child, rx_pkts_256_511, "256 to 511 bytes frames");
 4533         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4534             child, rx_pkts_512_1023, "512 to 1023 bytes frames");
 4535         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4536             child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4537         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4538             child, rx_pkts_1519_max, "1519 to max frames");
 4539         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
 4540             child, rx_pkts_too_long, "frames too long");
 4541         MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
 4542             child, rx_pkts_jabbers, "Jabber errors");
 4543         MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
 4544             child, rx_fifo_oflows, "FIFO overflows");
 4545 
 4546         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
 4547             NULL, "MSK TX Statistics");
 4548         child = SYSCTL_CHILDREN(tree);
 4549         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4550             child, tx_ucast_frames, "Unicast frames");
 4551         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4552             child, tx_bcast_frames, "Broadcast frames");
 4553         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4554             child, tx_pause_frames, "Pause frames");
 4555         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4556             child, tx_mcast_frames, "Multicast frames");
 4557         MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
 4558             child, tx_octets, "Octets");
 4559         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4560             child, tx_pkts_64, "64 bytes frames");
 4561         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4562             child, tx_pkts_65_127, "65 to 127 bytes frames");
 4563         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4564             child, tx_pkts_128_255, "128 to 255 bytes frames");
 4565         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4566             child, tx_pkts_256_511, "256 to 511 bytes frames");
 4567         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4568             child, tx_pkts_512_1023, "512 to 1023 bytes frames");
 4569         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4570             child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4571         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4572             child, tx_pkts_1519_max, "1519 to max frames");
 4573         MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
 4574             child, tx_colls, "Collisions");
 4575         MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
 4576             child, tx_late_colls, "Late collisions");
 4577         MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
 4578             child, tx_excess_colls, "Excessive collisions");
 4579         MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
 4580             child, tx_multi_colls, "Multiple collisions");
 4581         MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
 4582             child, tx_single_colls, "Single collisions");
 4583         MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
 4584             child, tx_underflows, "FIFO underflows");
 4585 }
 4586 
 4587 #undef MSK_SYSCTL_STAT32
 4588 #undef MSK_SYSCTL_STAT64
 4589 
 4590 static int
 4591 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 4592 {
 4593         int error, value;
 4594 
 4595         if (!arg1)
 4596                 return (EINVAL);
 4597         value = *(int *)arg1;
 4598         error = sysctl_handle_int(oidp, &value, 0, req);
 4599         if (error || !req->newptr)
 4600                 return (error);
 4601         if (value < low || value > high)
 4602                 return (EINVAL);
 4603         *(int *)arg1 = value;
 4604 
 4605         return (0);
 4606 }
 4607 
 4608 static int
 4609 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
 4610 {
 4611 
 4612         return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
 4613             MSK_PROC_MAX));
 4614 }

Cache object: 65ddcfb285011825efa1d7fcc9144f4b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.