The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * SPDX-License-Identifier: BSD-4-Clause AND BSD-3-Clause
   50  *
   51  * Copyright (c) 1997, 1998, 1999, 2000
   52  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   53  *
   54  * Redistribution and use in source and binary forms, with or without
   55  * modification, are permitted provided that the following conditions
   56  * are met:
   57  * 1. Redistributions of source code must retain the above copyright
   58  *    notice, this list of conditions and the following disclaimer.
   59  * 2. Redistributions in binary form must reproduce the above copyright
   60  *    notice, this list of conditions and the following disclaimer in the
   61  *    documentation and/or other materials provided with the distribution.
   62  * 3. All advertising materials mentioning features or use of this software
   63  *    must display the following acknowledgement:
   64  *      This product includes software developed by Bill Paul.
   65  * 4. Neither the name of the author nor the names of any co-contributors
   66  *    may be used to endorse or promote products derived from this software
   67  *    without specific prior written permission.
   68  *
   69  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   70  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   71  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   72  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   73  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   74  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   75  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   76  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   77  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   78  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   79  * THE POSSIBILITY OF SUCH DAMAGE.
   80  */
   81 /*-
   82  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   83  *
   84  * Permission to use, copy, modify, and distribute this software for any
   85  * purpose with or without fee is hereby granted, provided that the above
   86  * copyright notice and this permission notice appear in all copies.
   87  *
   88  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   89  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   90  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   91  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   92  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   93  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   94  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   95  */
   96 
   97 /*
   98  * Device driver for the Marvell Yukon II Ethernet controller.
   99  * Due to lack of documentation, this driver is based on the code from
  100  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
  101  */
  102 
  103 #include <sys/cdefs.h>
  104 __FBSDID("$FreeBSD$");
  105 
  106 #include <sys/param.h>
  107 #include <sys/systm.h>
  108 #include <sys/bus.h>
  109 #include <sys/endian.h>
  110 #include <sys/mbuf.h>
  111 #include <sys/malloc.h>
  112 #include <sys/kernel.h>
  113 #include <sys/module.h>
  114 #include <sys/socket.h>
  115 #include <sys/sockio.h>
  116 #include <sys/queue.h>
  117 #include <sys/sysctl.h>
  118 
  119 #include <net/bpf.h>
  120 #include <net/ethernet.h>
  121 #include <net/if.h>
  122 #include <net/if_var.h>
  123 #include <net/if_arp.h>
  124 #include <net/if_dl.h>
  125 #include <net/if_media.h>
  126 #include <net/if_types.h>
  127 #include <net/if_vlan_var.h>
  128 
  129 #include <netinet/in.h>
  130 #include <netinet/in_systm.h>
  131 #include <netinet/ip.h>
  132 #include <netinet/tcp.h>
  133 #include <netinet/udp.h>
  134 
  135 #include <machine/bus.h>
  136 #include <machine/in_cksum.h>
  137 #include <machine/resource.h>
  138 #include <sys/rman.h>
  139 
  140 #include <dev/mii/mii.h>
  141 #include <dev/mii/miivar.h>
  142 
  143 #include <dev/pci/pcireg.h>
  144 #include <dev/pci/pcivar.h>
  145 
  146 #include <dev/msk/if_mskreg.h>
  147 
  148 MODULE_DEPEND(msk, pci, 1, 1, 1);
  149 MODULE_DEPEND(msk, ether, 1, 1, 1);
  150 MODULE_DEPEND(msk, miibus, 1, 1, 1);
  151 
  152 /* "device miibus" required.  See GENERIC if you get errors here. */
  153 #include "miibus_if.h"
  154 
  155 /* Tunables. */
  156 static int msi_disable = 0;
  157 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
  158 static int legacy_intr = 0;
  159 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
  160 static int jumbo_disable = 0;
  161 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
  162 
  163 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  164 
  165 /*
  166  * Devices supported by this driver.
  167  */
  168 static const struct msk_product {
  169         uint16_t        msk_vendorid;
  170         uint16_t        msk_deviceid;
  171         const char      *msk_name;
  172 } msk_products[] = {
  173         { VENDORID_SK, DEVICEID_SK_YUKON2,
  174             "SK-9Sxx Gigabit Ethernet" },
  175         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  176             "SK-9Exx Gigabit Ethernet"},
  177         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  178             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  179         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  180             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  181         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  182             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  183         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  184             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  185         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  186             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  187         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  188             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  189         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  190             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  191         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  192             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  193         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  194             "Marvell Yukon 88E8035 Fast Ethernet" },
  195         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  196             "Marvell Yukon 88E8036 Fast Ethernet" },
  197         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  198             "Marvell Yukon 88E8038 Fast Ethernet" },
  199         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  200             "Marvell Yukon 88E8039 Fast Ethernet" },
  201         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  202             "Marvell Yukon 88E8040 Fast Ethernet" },
  203         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  204             "Marvell Yukon 88E8040T Fast Ethernet" },
  205         { VENDORID_MARVELL, DEVICEID_MRVL_8042,
  206             "Marvell Yukon 88E8042 Fast Ethernet" },
  207         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  208             "Marvell Yukon 88E8048 Fast Ethernet" },
  209         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  210             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  211         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  212             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  213         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  214             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  215         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  216             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  217         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  218             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  219         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  220             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  221         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  222             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  223         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  224             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  225         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  226             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  227         { VENDORID_MARVELL, DEVICEID_MRVL_436D,
  228             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  229         { VENDORID_MARVELL, DEVICEID_MRVL_4370,
  230             "Marvell Yukon 88E8075 Gigabit Ethernet" },
  231         { VENDORID_MARVELL, DEVICEID_MRVL_4380,
  232             "Marvell Yukon 88E8057 Gigabit Ethernet" },
  233         { VENDORID_MARVELL, DEVICEID_MRVL_4381,
  234             "Marvell Yukon 88E8059 Gigabit Ethernet" },
  235         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  236             "D-Link 550SX Gigabit Ethernet" },
  237         { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
  238             "D-Link 560SX Gigabit Ethernet" },
  239         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  240             "D-Link 560T Gigabit Ethernet" }
  241 };
  242 
  243 static const char *model_name[] = {
  244         "Yukon XL",
  245         "Yukon EC Ultra",
  246         "Yukon EX",
  247         "Yukon EC",
  248         "Yukon FE",
  249         "Yukon FE+",
  250         "Yukon Supreme",
  251         "Yukon Ultra 2",
  252         "Yukon Unknown",
  253         "Yukon Optima",
  254 };
  255 
  256 static int mskc_probe(device_t);
  257 static int mskc_attach(device_t);
  258 static int mskc_detach(device_t);
  259 static int mskc_shutdown(device_t);
  260 static int mskc_setup_rambuffer(struct msk_softc *);
  261 static int mskc_suspend(device_t);
  262 static int mskc_resume(device_t);
  263 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
  264 static void mskc_reset(struct msk_softc *);
  265 
  266 static int msk_probe(device_t);
  267 static int msk_attach(device_t);
  268 static int msk_detach(device_t);
  269 
  270 static void msk_tick(void *);
  271 static void msk_intr(void *);
  272 static void msk_intr_phy(struct msk_if_softc *);
  273 static void msk_intr_gmac(struct msk_if_softc *);
  274 static __inline void msk_rxput(struct msk_if_softc *);
  275 static int msk_handle_events(struct msk_softc *);
  276 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  277 static void msk_intr_hwerr(struct msk_softc *);
  278 #ifndef __NO_STRICT_ALIGNMENT
  279 static __inline void msk_fixup_rx(struct mbuf *);
  280 #endif
  281 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
  282 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  283 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  284 static void msk_txeof(struct msk_if_softc *, int);
  285 static int msk_encap(struct msk_if_softc *, struct mbuf **);
  286 static void msk_start(struct ifnet *);
  287 static void msk_start_locked(struct ifnet *);
  288 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
  289 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  290 static void msk_set_rambuffer(struct msk_if_softc *);
  291 static void msk_set_tx_stfwd(struct msk_if_softc *);
  292 static void msk_init(void *);
  293 static void msk_init_locked(struct msk_if_softc *);
  294 static void msk_stop(struct msk_if_softc *);
  295 static void msk_watchdog(struct msk_if_softc *);
  296 static int msk_mediachange(struct ifnet *);
  297 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
  298 static void msk_phy_power(struct msk_softc *, int);
  299 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  300 static int msk_status_dma_alloc(struct msk_softc *);
  301 static void msk_status_dma_free(struct msk_softc *);
  302 static int msk_txrx_dma_alloc(struct msk_if_softc *);
  303 static int msk_rx_dma_jalloc(struct msk_if_softc *);
  304 static void msk_txrx_dma_free(struct msk_if_softc *);
  305 static void msk_rx_dma_jfree(struct msk_if_softc *);
  306 static int msk_rx_fill(struct msk_if_softc *, int);
  307 static int msk_init_rx_ring(struct msk_if_softc *);
  308 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  309 static void msk_init_tx_ring(struct msk_if_softc *);
  310 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
  311 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  312 static int msk_newbuf(struct msk_if_softc *, int);
  313 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  314 
  315 static int msk_phy_readreg(struct msk_if_softc *, int, int);
  316 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
  317 static int msk_miibus_readreg(device_t, int, int);
  318 static int msk_miibus_writereg(device_t, int, int, int);
  319 static void msk_miibus_statchg(device_t);
  320 
  321 static void msk_rxfilter(struct msk_if_softc *);
  322 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
  323 
  324 static void msk_stats_clear(struct msk_if_softc *);
  325 static void msk_stats_update(struct msk_if_softc *);
  326 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
  327 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
  328 static void msk_sysctl_node(struct msk_if_softc *);
  329 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  330 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
  331 
  332 static device_method_t mskc_methods[] = {
  333         /* Device interface */
  334         DEVMETHOD(device_probe,         mskc_probe),
  335         DEVMETHOD(device_attach,        mskc_attach),
  336         DEVMETHOD(device_detach,        mskc_detach),
  337         DEVMETHOD(device_suspend,       mskc_suspend),
  338         DEVMETHOD(device_resume,        mskc_resume),
  339         DEVMETHOD(device_shutdown,      mskc_shutdown),
  340 
  341         DEVMETHOD(bus_get_dma_tag,      mskc_get_dma_tag),
  342 
  343         DEVMETHOD_END
  344 };
  345 
  346 static driver_t mskc_driver = {
  347         "mskc",
  348         mskc_methods,
  349         sizeof(struct msk_softc)
  350 };
  351 
  352 static devclass_t mskc_devclass;
  353 
  354 static device_method_t msk_methods[] = {
  355         /* Device interface */
  356         DEVMETHOD(device_probe,         msk_probe),
  357         DEVMETHOD(device_attach,        msk_attach),
  358         DEVMETHOD(device_detach,        msk_detach),
  359         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  360 
  361         /* MII interface */
  362         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  363         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  364         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  365 
  366         DEVMETHOD_END
  367 };
  368 
  369 static driver_t msk_driver = {
  370         "msk",
  371         msk_methods,
  372         sizeof(struct msk_if_softc)
  373 };
  374 
  375 static devclass_t msk_devclass;
  376 
  377 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
  378 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
  379 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
  380 
  381 static struct resource_spec msk_res_spec_io[] = {
  382         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  383         { -1,                   0,              0 }
  384 };
  385 
  386 static struct resource_spec msk_res_spec_mem[] = {
  387         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  388         { -1,                   0,              0 }
  389 };
  390 
  391 static struct resource_spec msk_irq_spec_legacy[] = {
  392         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  393         { -1,                   0,              0 }
  394 };
  395 
  396 static struct resource_spec msk_irq_spec_msi[] = {
  397         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  398         { -1,                   0,              0 }
  399 };
  400 
  401 static int
  402 msk_miibus_readreg(device_t dev, int phy, int reg)
  403 {
  404         struct msk_if_softc *sc_if;
  405 
  406         sc_if = device_get_softc(dev);
  407 
  408         return (msk_phy_readreg(sc_if, phy, reg));
  409 }
  410 
  411 static int
  412 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  413 {
  414         struct msk_softc *sc;
  415         int i, val;
  416 
  417         sc = sc_if->msk_softc;
  418 
  419         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  420             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  421 
  422         for (i = 0; i < MSK_TIMEOUT; i++) {
  423                 DELAY(1);
  424                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  425                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  426                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  427                         break;
  428                 }
  429         }
  430 
  431         if (i == MSK_TIMEOUT) {
  432                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  433                 val = 0;
  434         }
  435 
  436         return (val);
  437 }
  438 
  439 static int
  440 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  441 {
  442         struct msk_if_softc *sc_if;
  443 
  444         sc_if = device_get_softc(dev);
  445 
  446         return (msk_phy_writereg(sc_if, phy, reg, val));
  447 }
  448 
  449 static int
  450 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  451 {
  452         struct msk_softc *sc;
  453         int i;
  454 
  455         sc = sc_if->msk_softc;
  456 
  457         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  458         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  459             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  460         for (i = 0; i < MSK_TIMEOUT; i++) {
  461                 DELAY(1);
  462                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  463                     GM_SMI_CT_BUSY) == 0)
  464                         break;
  465         }
  466         if (i == MSK_TIMEOUT)
  467                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  468 
  469         return (0);
  470 }
  471 
  472 static void
  473 msk_miibus_statchg(device_t dev)
  474 {
  475         struct msk_softc *sc;
  476         struct msk_if_softc *sc_if;
  477         struct mii_data *mii;
  478         struct ifnet *ifp;
  479         uint32_t gmac;
  480 
  481         sc_if = device_get_softc(dev);
  482         sc = sc_if->msk_softc;
  483 
  484         MSK_IF_LOCK_ASSERT(sc_if);
  485 
  486         mii = device_get_softc(sc_if->msk_miibus);
  487         ifp = sc_if->msk_ifp;
  488         if (mii == NULL || ifp == NULL ||
  489             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  490                 return;
  491 
  492         sc_if->msk_flags &= ~MSK_FLAG_LINK;
  493         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  494             (IFM_AVALID | IFM_ACTIVE)) {
  495                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  496                 case IFM_10_T:
  497                 case IFM_100_TX:
  498                         sc_if->msk_flags |= MSK_FLAG_LINK;
  499                         break;
  500                 case IFM_1000_T:
  501                 case IFM_1000_SX:
  502                 case IFM_1000_LX:
  503                 case IFM_1000_CX:
  504                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  505                                 sc_if->msk_flags |= MSK_FLAG_LINK;
  506                         break;
  507                 default:
  508                         break;
  509                 }
  510         }
  511 
  512         if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
  513                 /* Enable Tx FIFO Underrun. */
  514                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  515                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  516                 /*
  517                  * Because mii(4) notify msk(4) that it detected link status
  518                  * change, there is no need to enable automatic
  519                  * speed/flow-control/duplex updates.
  520                  */
  521                 gmac = GM_GPCR_AU_ALL_DIS;
  522                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  523                 case IFM_1000_SX:
  524                 case IFM_1000_T:
  525                         gmac |= GM_GPCR_SPEED_1000;
  526                         break;
  527                 case IFM_100_TX:
  528                         gmac |= GM_GPCR_SPEED_100;
  529                         break;
  530                 case IFM_10_T:
  531                         break;
  532                 }
  533 
  534                 if ((IFM_OPTIONS(mii->mii_media_active) &
  535                     IFM_ETH_RXPAUSE) == 0)
  536                         gmac |= GM_GPCR_FC_RX_DIS;
  537                 if ((IFM_OPTIONS(mii->mii_media_active) &
  538                      IFM_ETH_TXPAUSE) == 0)
  539                         gmac |= GM_GPCR_FC_TX_DIS;
  540                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  541                         gmac |= GM_GPCR_DUP_FULL;
  542                 else
  543                         gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
  544                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  545                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  546                 /* Read again to ensure writing. */
  547                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  548                 gmac = GMC_PAUSE_OFF;
  549                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  550                         if ((IFM_OPTIONS(mii->mii_media_active) &
  551                             IFM_ETH_RXPAUSE) != 0)
  552                                 gmac = GMC_PAUSE_ON;
  553                 }
  554                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  555 
  556                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  557                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  558                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  559         } else {
  560                 /*
  561                  * Link state changed to down.
  562                  * Disable PHY interrupts.
  563                  */
  564                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  565                 /* Disable Rx/Tx MAC. */
  566                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  567                 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
  568                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  569                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  570                         /* Read again to ensure writing. */
  571                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  572                 }
  573         }
  574 }
  575 
  576 static void
  577 msk_rxfilter(struct msk_if_softc *sc_if)
  578 {
  579         struct msk_softc *sc;
  580         struct ifnet *ifp;
  581         struct ifmultiaddr *ifma;
  582         uint32_t mchash[2];
  583         uint32_t crc;
  584         uint16_t mode;
  585 
  586         sc = sc_if->msk_softc;
  587 
  588         MSK_IF_LOCK_ASSERT(sc_if);
  589 
  590         ifp = sc_if->msk_ifp;
  591 
  592         bzero(mchash, sizeof(mchash));
  593         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  594         if ((ifp->if_flags & IFF_PROMISC) != 0)
  595                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  596         else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  597                 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  598                 mchash[0] = 0xffff;
  599                 mchash[1] = 0xffff;
  600         } else {
  601                 mode |= GM_RXCR_UCF_ENA;
  602                 if_maddr_rlock(ifp);
  603                 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  604                         if (ifma->ifma_addr->sa_family != AF_LINK)
  605                                 continue;
  606                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  607                             ifma->ifma_addr), ETHER_ADDR_LEN);
  608                         /* Just want the 6 least significant bits. */
  609                         crc &= 0x3f;
  610                         /* Set the corresponding bit in the hash table. */
  611                         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  612                 }
  613                 if_maddr_runlock(ifp);
  614                 if (mchash[0] != 0 || mchash[1] != 0)
  615                         mode |= GM_RXCR_MCF_ENA;
  616         }
  617 
  618         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  619             mchash[0] & 0xffff);
  620         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  621             (mchash[0] >> 16) & 0xffff);
  622         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  623             mchash[1] & 0xffff);
  624         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  625             (mchash[1] >> 16) & 0xffff);
  626         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  627 }
  628 
  629 static void
  630 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  631 {
  632         struct msk_softc *sc;
  633 
  634         sc = sc_if->msk_softc;
  635         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  636                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  637                     RX_VLAN_STRIP_ON);
  638                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  639                     TX_VLAN_TAG_ON);
  640         } else {
  641                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  642                     RX_VLAN_STRIP_OFF);
  643                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  644                     TX_VLAN_TAG_OFF);
  645         }
  646 }
  647 
  648 static int
  649 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
  650 {
  651         uint16_t idx;
  652         int i;
  653 
  654         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  655             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  656                 /* Wait until controller executes OP_TCPSTART command. */
  657                 for (i = 100; i > 0; i--) {
  658                         DELAY(100);
  659                         idx = CSR_READ_2(sc_if->msk_softc,
  660                             Y2_PREF_Q_ADDR(sc_if->msk_rxq,
  661                             PREF_UNIT_GET_IDX_REG));
  662                         if (idx != 0)
  663                                 break;
  664                 }
  665                 if (i == 0) {
  666                         device_printf(sc_if->msk_if_dev,
  667                             "prefetch unit stuck?\n");
  668                         return (ETIMEDOUT);
  669                 }
  670                 /*
  671                  * Fill consumed LE with free buffer. This can be done
  672                  * in Rx handler but we don't want to add special code
  673                  * in fast handler.
  674                  */
  675                 if (jumbo > 0) {
  676                         if (msk_jumbo_newbuf(sc_if, 0) != 0)
  677                                 return (ENOBUFS);
  678                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  679                             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  680                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  681                 } else {
  682                         if (msk_newbuf(sc_if, 0) != 0)
  683                                 return (ENOBUFS);
  684                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  685                             sc_if->msk_cdata.msk_rx_ring_map,
  686                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  687                 }
  688                 sc_if->msk_cdata.msk_rx_prod = 0;
  689                 CSR_WRITE_2(sc_if->msk_softc,
  690                     Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  691                     sc_if->msk_cdata.msk_rx_prod);
  692         }
  693         return (0);
  694 }
  695 
  696 static int
  697 msk_init_rx_ring(struct msk_if_softc *sc_if)
  698 {
  699         struct msk_ring_data *rd;
  700         struct msk_rxdesc *rxd;
  701         int i, nbuf, prod;
  702 
  703         MSK_IF_LOCK_ASSERT(sc_if);
  704 
  705         sc_if->msk_cdata.msk_rx_cons = 0;
  706         sc_if->msk_cdata.msk_rx_prod = 0;
  707         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  708 
  709         rd = &sc_if->msk_rdata;
  710         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  711         for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
  712                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  713                 rxd->rx_m = NULL;
  714                 rxd->rx_le = &rd->msk_rx_ring[prod];
  715                 MSK_INC(prod, MSK_RX_RING_CNT);
  716         }
  717         nbuf = MSK_RX_BUF_CNT;
  718         prod = 0;
  719         /* Have controller know how to compute Rx checksum. */
  720         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  721             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  722 #ifdef MSK_64BIT_DMA
  723                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  724                 rxd->rx_m = NULL;
  725                 rxd->rx_le = &rd->msk_rx_ring[prod];
  726                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  727                     ETHER_HDR_LEN);
  728                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  729                 MSK_INC(prod, MSK_RX_RING_CNT);
  730                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  731 #endif
  732                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  733                 rxd->rx_m = NULL;
  734                 rxd->rx_le = &rd->msk_rx_ring[prod];
  735                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  736                     ETHER_HDR_LEN);
  737                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  738                 MSK_INC(prod, MSK_RX_RING_CNT);
  739                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  740                 nbuf--;
  741         }
  742         for (i = 0; i < nbuf; i++) {
  743                 if (msk_newbuf(sc_if, prod) != 0)
  744                         return (ENOBUFS);
  745                 MSK_RX_INC(prod, MSK_RX_RING_CNT);
  746         }
  747 
  748         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  749             sc_if->msk_cdata.msk_rx_ring_map,
  750             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  751 
  752         /* Update prefetch unit. */
  753         sc_if->msk_cdata.msk_rx_prod = prod;
  754         CSR_WRITE_2(sc_if->msk_softc,
  755             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  756             (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
  757             MSK_RX_RING_CNT);
  758         if (msk_rx_fill(sc_if, 0) != 0)
  759                 return (ENOBUFS);
  760         return (0);
  761 }
  762 
  763 static int
  764 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  765 {
  766         struct msk_ring_data *rd;
  767         struct msk_rxdesc *rxd;
  768         int i, nbuf, prod;
  769 
  770         MSK_IF_LOCK_ASSERT(sc_if);
  771 
  772         sc_if->msk_cdata.msk_rx_cons = 0;
  773         sc_if->msk_cdata.msk_rx_prod = 0;
  774         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  775 
  776         rd = &sc_if->msk_rdata;
  777         bzero(rd->msk_jumbo_rx_ring,
  778             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  779         for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  780                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  781                 rxd->rx_m = NULL;
  782                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  783                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  784         }
  785         nbuf = MSK_RX_BUF_CNT;
  786         prod = 0;
  787         /* Have controller know how to compute Rx checksum. */
  788         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  789             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  790 #ifdef MSK_64BIT_DMA
  791                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  792                 rxd->rx_m = NULL;
  793                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  794                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  795                     ETHER_HDR_LEN);
  796                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  797                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  798                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  799 #endif
  800                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  801                 rxd->rx_m = NULL;
  802                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  803                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  804                     ETHER_HDR_LEN);
  805                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  806                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  807                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  808                 nbuf--;
  809         }
  810         for (i = 0; i < nbuf; i++) {
  811                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  812                         return (ENOBUFS);
  813                 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
  814         }
  815 
  816         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  817             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  818             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  819 
  820         /* Update prefetch unit. */
  821         sc_if->msk_cdata.msk_rx_prod = prod;
  822         CSR_WRITE_2(sc_if->msk_softc,
  823             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  824             (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
  825             MSK_JUMBO_RX_RING_CNT);
  826         if (msk_rx_fill(sc_if, 1) != 0)
  827                 return (ENOBUFS);
  828         return (0);
  829 }
  830 
  831 static void
  832 msk_init_tx_ring(struct msk_if_softc *sc_if)
  833 {
  834         struct msk_ring_data *rd;
  835         struct msk_txdesc *txd;
  836         int i;
  837 
  838         sc_if->msk_cdata.msk_tso_mtu = 0;
  839         sc_if->msk_cdata.msk_last_csum = 0;
  840         sc_if->msk_cdata.msk_tx_prod = 0;
  841         sc_if->msk_cdata.msk_tx_cons = 0;
  842         sc_if->msk_cdata.msk_tx_cnt = 0;
  843         sc_if->msk_cdata.msk_tx_high_addr = 0;
  844 
  845         rd = &sc_if->msk_rdata;
  846         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  847         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  848                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  849                 txd->tx_m = NULL;
  850                 txd->tx_le = &rd->msk_tx_ring[i];
  851         }
  852 
  853         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
  854             sc_if->msk_cdata.msk_tx_ring_map,
  855             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  856 }
  857 
  858 static __inline void
  859 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  860 {
  861         struct msk_rx_desc *rx_le;
  862         struct msk_rxdesc *rxd;
  863         struct mbuf *m;
  864 
  865 #ifdef MSK_64BIT_DMA
  866         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  867         rx_le = rxd->rx_le;
  868         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  869         MSK_INC(idx, MSK_RX_RING_CNT);
  870 #endif
  871         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  872         m = rxd->rx_m;
  873         rx_le = rxd->rx_le;
  874         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  875 }
  876 
  877 static __inline void
  878 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  879 {
  880         struct msk_rx_desc *rx_le;
  881         struct msk_rxdesc *rxd;
  882         struct mbuf *m;
  883 
  884 #ifdef MSK_64BIT_DMA
  885         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  886         rx_le = rxd->rx_le;
  887         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  888         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  889 #endif
  890         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  891         m = rxd->rx_m;
  892         rx_le = rxd->rx_le;
  893         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  894 }
  895 
  896 static int
  897 msk_newbuf(struct msk_if_softc *sc_if, int idx)
  898 {
  899         struct msk_rx_desc *rx_le;
  900         struct msk_rxdesc *rxd;
  901         struct mbuf *m;
  902         bus_dma_segment_t segs[1];
  903         bus_dmamap_t map;
  904         int nsegs;
  905 
  906         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  907         if (m == NULL)
  908                 return (ENOBUFS);
  909 
  910         m->m_len = m->m_pkthdr.len = MCLBYTES;
  911         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  912                 m_adj(m, ETHER_ALIGN);
  913 #ifndef __NO_STRICT_ALIGNMENT
  914         else
  915                 m_adj(m, MSK_RX_BUF_ALIGN);
  916 #endif
  917 
  918         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
  919             sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
  920             BUS_DMA_NOWAIT) != 0) {
  921                 m_freem(m);
  922                 return (ENOBUFS);
  923         }
  924         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  925 
  926         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  927 #ifdef MSK_64BIT_DMA
  928         rx_le = rxd->rx_le;
  929         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  930         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  931         MSK_INC(idx, MSK_RX_RING_CNT);
  932         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  933 #endif
  934         if (rxd->rx_m != NULL) {
  935                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  936                     BUS_DMASYNC_POSTREAD);
  937                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  938                 rxd->rx_m = NULL;
  939         }
  940         map = rxd->rx_dmamap;
  941         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  942         sc_if->msk_cdata.msk_rx_sparemap = map;
  943         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  944             BUS_DMASYNC_PREREAD);
  945         rxd->rx_m = m;
  946         rx_le = rxd->rx_le;
  947         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  948         rx_le->msk_control =
  949             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  950 
  951         return (0);
  952 }
  953 
  954 static int
  955 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  956 {
  957         struct msk_rx_desc *rx_le;
  958         struct msk_rxdesc *rxd;
  959         struct mbuf *m;
  960         bus_dma_segment_t segs[1];
  961         bus_dmamap_t map;
  962         int nsegs;
  963 
  964         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
  965         if (m == NULL)
  966                 return (ENOBUFS);
  967         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
  968         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  969                 m_adj(m, ETHER_ALIGN);
  970 #ifndef __NO_STRICT_ALIGNMENT
  971         else
  972                 m_adj(m, MSK_RX_BUF_ALIGN);
  973 #endif
  974 
  975         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  976             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  977             BUS_DMA_NOWAIT) != 0) {
  978                 m_freem(m);
  979                 return (ENOBUFS);
  980         }
  981         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  982 
  983         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  984 #ifdef MSK_64BIT_DMA
  985         rx_le = rxd->rx_le;
  986         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  987         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  988         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  989         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  990 #endif
  991         if (rxd->rx_m != NULL) {
  992                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  993                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
  994                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
  995                     rxd->rx_dmamap);
  996                 rxd->rx_m = NULL;
  997         }
  998         map = rxd->rx_dmamap;
  999         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
 1000         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
 1001         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
 1002             BUS_DMASYNC_PREREAD);
 1003         rxd->rx_m = m;
 1004         rx_le = rxd->rx_le;
 1005         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
 1006         rx_le->msk_control =
 1007             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
 1008 
 1009         return (0);
 1010 }
 1011 
 1012 /*
 1013  * Set media options.
 1014  */
 1015 static int
 1016 msk_mediachange(struct ifnet *ifp)
 1017 {
 1018         struct msk_if_softc *sc_if;
 1019         struct mii_data *mii;
 1020         int error;
 1021 
 1022         sc_if = ifp->if_softc;
 1023 
 1024         MSK_IF_LOCK(sc_if);
 1025         mii = device_get_softc(sc_if->msk_miibus);
 1026         error = mii_mediachg(mii);
 1027         MSK_IF_UNLOCK(sc_if);
 1028 
 1029         return (error);
 1030 }
 1031 
 1032 /*
 1033  * Report current media status.
 1034  */
 1035 static void
 1036 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1037 {
 1038         struct msk_if_softc *sc_if;
 1039         struct mii_data *mii;
 1040 
 1041         sc_if = ifp->if_softc;
 1042         MSK_IF_LOCK(sc_if);
 1043         if ((ifp->if_flags & IFF_UP) == 0) {
 1044                 MSK_IF_UNLOCK(sc_if);
 1045                 return;
 1046         }
 1047         mii = device_get_softc(sc_if->msk_miibus);
 1048 
 1049         mii_pollstat(mii);
 1050         ifmr->ifm_active = mii->mii_media_active;
 1051         ifmr->ifm_status = mii->mii_media_status;
 1052         MSK_IF_UNLOCK(sc_if);
 1053 }
 1054 
 1055 static int
 1056 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1057 {
 1058         struct msk_if_softc *sc_if;
 1059         struct ifreq *ifr;
 1060         struct mii_data *mii;
 1061         int error, mask, reinit;
 1062 
 1063         sc_if = ifp->if_softc;
 1064         ifr = (struct ifreq *)data;
 1065         error = 0;
 1066 
 1067         switch(command) {
 1068         case SIOCSIFMTU:
 1069                 MSK_IF_LOCK(sc_if);
 1070                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
 1071                         error = EINVAL;
 1072                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1073                         if (ifr->ifr_mtu > ETHERMTU) {
 1074                                 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 1075                                         error = EINVAL;
 1076                                         MSK_IF_UNLOCK(sc_if);
 1077                                         break;
 1078                                 }
 1079                                 if ((sc_if->msk_flags &
 1080                                     MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1081                                         ifp->if_hwassist &=
 1082                                             ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1083                                         ifp->if_capenable &=
 1084                                             ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1085                                         VLAN_CAPABILITIES(ifp);
 1086                                 }
 1087                         }
 1088                         ifp->if_mtu = ifr->ifr_mtu;
 1089                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1090                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1091                                 msk_init_locked(sc_if);
 1092                         }
 1093                 }
 1094                 MSK_IF_UNLOCK(sc_if);
 1095                 break;
 1096         case SIOCSIFFLAGS:
 1097                 MSK_IF_LOCK(sc_if);
 1098                 if ((ifp->if_flags & IFF_UP) != 0) {
 1099                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1100                             ((ifp->if_flags ^ sc_if->msk_if_flags) &
 1101                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1102                                 msk_rxfilter(sc_if);
 1103                         else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
 1104                                 msk_init_locked(sc_if);
 1105                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1106                         msk_stop(sc_if);
 1107                 sc_if->msk_if_flags = ifp->if_flags;
 1108                 MSK_IF_UNLOCK(sc_if);
 1109                 break;
 1110         case SIOCADDMULTI:
 1111         case SIOCDELMULTI:
 1112                 MSK_IF_LOCK(sc_if);
 1113                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1114                         msk_rxfilter(sc_if);
 1115                 MSK_IF_UNLOCK(sc_if);
 1116                 break;
 1117         case SIOCGIFMEDIA:
 1118         case SIOCSIFMEDIA:
 1119                 mii = device_get_softc(sc_if->msk_miibus);
 1120                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1121                 break;
 1122         case SIOCSIFCAP:
 1123                 reinit = 0;
 1124                 MSK_IF_LOCK(sc_if);
 1125                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1126                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1127                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1128                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1129                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 1130                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
 1131                         else
 1132                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
 1133                 }
 1134                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1135                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
 1136                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1137                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1138                                 reinit = 1;
 1139                 }
 1140                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1141                     (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
 1142                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1143                 if ((mask & IFCAP_TSO4) != 0 &&
 1144                     (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1145                         ifp->if_capenable ^= IFCAP_TSO4;
 1146                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1147                                 ifp->if_hwassist |= CSUM_TSO;
 1148                         else
 1149                                 ifp->if_hwassist &= ~CSUM_TSO;
 1150                 }
 1151                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1152                     (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
 1153                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1154                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1155                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1156                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1157                         if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
 1158                                 ifp->if_capenable &=
 1159                                     ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
 1160                         msk_setvlan(sc_if, ifp);
 1161                 }
 1162                 if (ifp->if_mtu > ETHERMTU &&
 1163                     (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1164                         ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1165                         ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1166                 }
 1167                 VLAN_CAPABILITIES(ifp);
 1168                 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1169                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1170                         msk_init_locked(sc_if);
 1171                 }
 1172                 MSK_IF_UNLOCK(sc_if);
 1173                 break;
 1174         default:
 1175                 error = ether_ioctl(ifp, command, data);
 1176                 break;
 1177         }
 1178 
 1179         return (error);
 1180 }
 1181 
 1182 static int
 1183 mskc_probe(device_t dev)
 1184 {
 1185         const struct msk_product *mp;
 1186         uint16_t vendor, devid;
 1187         int i;
 1188 
 1189         vendor = pci_get_vendor(dev);
 1190         devid = pci_get_device(dev);
 1191         mp = msk_products;
 1192         for (i = 0; i < nitems(msk_products); i++, mp++) {
 1193                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
 1194                         device_set_desc(dev, mp->msk_name);
 1195                         return (BUS_PROBE_DEFAULT);
 1196                 }
 1197         }
 1198 
 1199         return (ENXIO);
 1200 }
 1201 
 1202 static int
 1203 mskc_setup_rambuffer(struct msk_softc *sc)
 1204 {
 1205         int next;
 1206         int i;
 1207 
 1208         /* Get adapter SRAM size. */
 1209         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
 1210         if (bootverbose)
 1211                 device_printf(sc->msk_dev,
 1212                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
 1213         if (sc->msk_ramsize == 0)
 1214                 return (0);
 1215 
 1216         sc->msk_pflags |= MSK_FLAG_RAMBUF;
 1217         /*
 1218          * Give receiver 2/3 of memory and round down to the multiple
 1219          * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
 1220          * of 1024.
 1221          */
 1222         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
 1223         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
 1224         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
 1225                 sc->msk_rxqstart[i] = next;
 1226                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
 1227                 next = sc->msk_rxqend[i] + 1;
 1228                 sc->msk_txqstart[i] = next;
 1229                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
 1230                 next = sc->msk_txqend[i] + 1;
 1231                 if (bootverbose) {
 1232                         device_printf(sc->msk_dev,
 1233                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1234                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1235                             sc->msk_rxqend[i]);
 1236                         device_printf(sc->msk_dev,
 1237                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1238                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1239                             sc->msk_txqend[i]);
 1240                 }
 1241         }
 1242 
 1243         return (0);
 1244 }
 1245 
 1246 static void
 1247 msk_phy_power(struct msk_softc *sc, int mode)
 1248 {
 1249         uint32_t our, val;
 1250         int i;
 1251 
 1252         switch (mode) {
 1253         case MSK_PHY_POWERUP:
 1254                 /* Switch power to VCC (WA for VAUX problem). */
 1255                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1256                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1257                 /* Disable Core Clock Division, set Clock Select to 0. */
 1258                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1259 
 1260                 val = 0;
 1261                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1262                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1263                         /* Enable bits are inverted. */
 1264                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1265                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1266                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1267                 }
 1268                 /*
 1269                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1270                  */
 1271                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1272 
 1273                 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1274                 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1275                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1276                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1277                                 /* Deassert Low Power for 1st PHY. */
 1278                                 our |= PCI_Y2_PHY1_COMA;
 1279                                 if (sc->msk_num_port > 1)
 1280                                         our |= PCI_Y2_PHY2_COMA;
 1281                         }
 1282                 }
 1283                 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
 1284                     sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1285                     sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
 1286                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
 1287                         val &= (PCI_FORCE_ASPM_REQUEST |
 1288                             PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
 1289                             PCI_ASPM_CLKRUN_REQUEST);
 1290                         /* Set all bits to 0 except bits 15..12. */
 1291                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
 1292                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
 1293                         val &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1294                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
 1295                         CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
 1296                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
 1297                         /*
 1298                          * Disable status race, workaround for
 1299                          * Yukon EC Ultra & Yukon EX.
 1300                          */
 1301                         val = CSR_READ_4(sc, B2_GP_IO);
 1302                         val |= GLB_GPIO_STAT_RACE_DIS;
 1303                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1304                         CSR_READ_4(sc, B2_GP_IO);
 1305                 }
 1306                 /* Release PHY from PowerDown/COMA mode. */
 1307                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
 1308 
 1309                 for (i = 0; i < sc->msk_num_port; i++) {
 1310                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1311                             GMLC_RST_SET);
 1312                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1313                             GMLC_RST_CLR);
 1314                 }
 1315                 break;
 1316         case MSK_PHY_POWERDOWN:
 1317                 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1318                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1319                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1320                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1321                         val &= ~PCI_Y2_PHY1_COMA;
 1322                         if (sc->msk_num_port > 1)
 1323                                 val &= ~PCI_Y2_PHY2_COMA;
 1324                 }
 1325                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1326 
 1327                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1328                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1329                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1330                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1331                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1332                         /* Enable bits are inverted. */
 1333                         val = 0;
 1334                 }
 1335                 /*
 1336                  * Disable PCI & Core Clock, disable clock gating for
 1337                  * both Links.
 1338                  */
 1339                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1340                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1341                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1342                 break;
 1343         default:
 1344                 break;
 1345         }
 1346 }
 1347 
 1348 static void
 1349 mskc_reset(struct msk_softc *sc)
 1350 {
 1351         bus_addr_t addr;
 1352         uint16_t status;
 1353         uint32_t val;
 1354         int i, initram;
 1355 
 1356         /* Disable ASF. */
 1357         if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
 1358             sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
 1359                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1360                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1361                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1362                         status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1363                         /* Clear AHB bridge & microcontroller reset. */
 1364                         status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1365                             Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1366                         /* Clear ASF microcontroller state. */
 1367                         status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1368                         status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
 1369                         CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1370                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1371                 } else
 1372                         CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1373                 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1374                 /*
 1375                  * Since we disabled ASF, S/W reset is required for
 1376                  * Power Management.
 1377                  */
 1378                 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1379                 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1380         }
 1381 
 1382         /* Clear all error bits in the PCI status register. */
 1383         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1384         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1385 
 1386         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1387             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1388             PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 1389         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1390 
 1391         switch (sc->msk_bustype) {
 1392         case MSK_PEX_BUS:
 1393                 /* Clear all PEX errors. */
 1394                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1395                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1396                 if ((val & PEX_RX_OV) != 0) {
 1397                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1398                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1399                 }
 1400                 break;
 1401         case MSK_PCI_BUS:
 1402         case MSK_PCIX_BUS:
 1403                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1404                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1405                 if (val == 0)
 1406                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1407                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1408                         /* Set Cache Line Size opt. */
 1409                         val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1410                         val |= PCI_CLS_OPT;
 1411                         pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1412                 }
 1413                 break;
 1414         }
 1415         /* Set PHY power state. */
 1416         msk_phy_power(sc, MSK_PHY_POWERUP);
 1417 
 1418         /* Reset GPHY/GMAC Control */
 1419         for (i = 0; i < sc->msk_num_port; i++) {
 1420                 /* GPHY Control reset. */
 1421                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1422                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1423                 /* GMAC Control reset. */
 1424                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1425                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1426                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1427                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1428                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 1429                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1430                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1431                             GMC_BYP_RETR_ON);
 1432         }
 1433 
 1434         if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
 1435             sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
 1436                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
 1437         if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
 1438                 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
 1439                 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
 1440         }
 1441         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1442 
 1443         /* LED On. */
 1444         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1445 
 1446         /* Clear TWSI IRQ. */
 1447         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1448 
 1449         /* Turn off hardware timer. */
 1450         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1451         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1452 
 1453         /* Turn off descriptor polling. */
 1454         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1455 
 1456         /* Turn off time stamps. */
 1457         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1458         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1459 
 1460         initram = 0;
 1461         if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
 1462             sc->msk_hw_id == CHIP_ID_YUKON_EC ||
 1463             sc->msk_hw_id == CHIP_ID_YUKON_FE)
 1464                 initram++;
 1465 
 1466         /* Configure timeout values. */
 1467         for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
 1468                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
 1469                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
 1470                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1471                     MSK_RI_TO_53);
 1472                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1473                     MSK_RI_TO_53);
 1474                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1475                     MSK_RI_TO_53);
 1476                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1477                     MSK_RI_TO_53);
 1478                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1479                     MSK_RI_TO_53);
 1480                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1481                     MSK_RI_TO_53);
 1482                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1483                     MSK_RI_TO_53);
 1484                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1485                     MSK_RI_TO_53);
 1486                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1487                     MSK_RI_TO_53);
 1488                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1489                     MSK_RI_TO_53);
 1490                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1491                     MSK_RI_TO_53);
 1492                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1493                     MSK_RI_TO_53);
 1494         }
 1495 
 1496         /* Disable all interrupts. */
 1497         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1498         CSR_READ_4(sc, B0_HWE_IMSK);
 1499         CSR_WRITE_4(sc, B0_IMSK, 0);
 1500         CSR_READ_4(sc, B0_IMSK);
 1501 
 1502         /*
 1503          * On dual port PCI-X card, there is an problem where status
 1504          * can be received out of order due to split transactions.
 1505          */
 1506         if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
 1507                 uint16_t pcix_cmd;
 1508 
 1509                 pcix_cmd = pci_read_config(sc->msk_dev,
 1510                     sc->msk_pcixcap + PCIXR_COMMAND, 2);
 1511                 /* Clear Max Outstanding Split Transactions. */
 1512                 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
 1513                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1514                 pci_write_config(sc->msk_dev,
 1515                     sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
 1516                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1517         }
 1518         if (sc->msk_expcap != 0) {
 1519                 /* Change Max. Read Request Size to 2048 bytes. */
 1520                 if (pci_get_max_read_req(sc->msk_dev) == 512)
 1521                         pci_set_max_read_req(sc->msk_dev, 2048);
 1522         }
 1523 
 1524         /* Clear status list. */
 1525         bzero(sc->msk_stat_ring,
 1526             sizeof(struct msk_stat_desc) * sc->msk_stat_count);
 1527         sc->msk_stat_cons = 0;
 1528         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 1529             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1530         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1531         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1532         /* Set the status list base address. */
 1533         addr = sc->msk_stat_ring_paddr;
 1534         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1535         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1536         /* Set the status list last index. */
 1537         CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
 1538         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1539             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1540                 /* WA for dev. #4.3 */
 1541                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1542                 /* WA for dev. #4.18 */
 1543                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1544                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1545         } else {
 1546                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1547                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1548                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1549                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1550                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1551                 else
 1552                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1553                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1554         }
 1555         /*
 1556          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1557          */
 1558         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1559 
 1560         /* Enable status unit. */
 1561         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1562 
 1563         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1564         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1565         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1566 }
 1567 
 1568 static int
 1569 msk_probe(device_t dev)
 1570 {
 1571         struct msk_softc *sc;
 1572         char desc[100];
 1573 
 1574         sc = device_get_softc(device_get_parent(dev));
 1575         /*
 1576          * Not much to do here. We always know there will be
 1577          * at least one GMAC present, and if there are two,
 1578          * mskc_attach() will create a second device instance
 1579          * for us.
 1580          */
 1581         snprintf(desc, sizeof(desc),
 1582             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1583             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1584             sc->msk_hw_rev);
 1585         device_set_desc_copy(dev, desc);
 1586 
 1587         return (BUS_PROBE_DEFAULT);
 1588 }
 1589 
 1590 static int
 1591 msk_attach(device_t dev)
 1592 {
 1593         struct msk_softc *sc;
 1594         struct msk_if_softc *sc_if;
 1595         struct ifnet *ifp;
 1596         struct msk_mii_data *mmd;
 1597         int i, port, error;
 1598         uint8_t eaddr[6];
 1599 
 1600         if (dev == NULL)
 1601                 return (EINVAL);
 1602 
 1603         error = 0;
 1604         sc_if = device_get_softc(dev);
 1605         sc = device_get_softc(device_get_parent(dev));
 1606         mmd = device_get_ivars(dev);
 1607         port = mmd->port;
 1608 
 1609         sc_if->msk_if_dev = dev;
 1610         sc_if->msk_port = port;
 1611         sc_if->msk_softc = sc;
 1612         sc_if->msk_flags = sc->msk_pflags;
 1613         sc->msk_if[port] = sc_if;
 1614         /* Setup Tx/Rx queue register offsets. */
 1615         if (port == MSK_PORT_A) {
 1616                 sc_if->msk_txq = Q_XA1;
 1617                 sc_if->msk_txsq = Q_XS1;
 1618                 sc_if->msk_rxq = Q_R1;
 1619         } else {
 1620                 sc_if->msk_txq = Q_XA2;
 1621                 sc_if->msk_txsq = Q_XS2;
 1622                 sc_if->msk_rxq = Q_R2;
 1623         }
 1624 
 1625         callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
 1626         msk_sysctl_node(sc_if);
 1627 
 1628         if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
 1629                 goto fail;
 1630         msk_rx_dma_jalloc(sc_if);
 1631 
 1632         ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
 1633         if (ifp == NULL) {
 1634                 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
 1635                 error = ENOSPC;
 1636                 goto fail;
 1637         }
 1638         ifp->if_softc = sc_if;
 1639         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1640         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1641         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
 1642         /*
 1643          * Enable Rx checksum offloading if controller supports
 1644          * new descriptor formant and controller is not Yukon XL.
 1645          */
 1646         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 1647             sc->msk_hw_id != CHIP_ID_YUKON_XL)
 1648                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1649         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1650             (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1651                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1652         ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
 1653         ifp->if_capenable = ifp->if_capabilities;
 1654         ifp->if_ioctl = msk_ioctl;
 1655         ifp->if_start = msk_start;
 1656         ifp->if_init = msk_init;
 1657         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1658         ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
 1659         IFQ_SET_READY(&ifp->if_snd);
 1660         /*
 1661          * Get station address for this interface. Note that
 1662          * dual port cards actually come with three station
 1663          * addresses: one for each port, plus an extra. The
 1664          * extra one is used by the SysKonnect driver software
 1665          * as a 'virtual' station address for when both ports
 1666          * are operating in failover mode. Currently we don't
 1667          * use this extra address.
 1668          */
 1669         MSK_IF_LOCK(sc_if);
 1670         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1671                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1672 
 1673         /*
 1674          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1675          */
 1676         MSK_IF_UNLOCK(sc_if);
 1677         ether_ifattach(ifp, eaddr);
 1678         MSK_IF_LOCK(sc_if);
 1679 
 1680         /* VLAN capability setup */
 1681         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1682         if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
 1683                 /*
 1684                  * Due to Tx checksum offload hardware bugs, msk(4) manually
 1685                  * computes checksum for short frames. For VLAN tagged frames
 1686                  * this workaround does not work so disable checksum offload
 1687                  * for VLAN interface.
 1688                  */
 1689                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
 1690                 /*
 1691                  * Enable Rx checksum offloading for VLAN tagged frames
 1692                  * if controller support new descriptor format.
 1693                  */
 1694                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1695                     (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1696                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
 1697         }
 1698         ifp->if_capenable = ifp->if_capabilities;
 1699         /*
 1700          * Disable RX checksum offloading on controllers that don't use
 1701          * new descriptor format but give chance to enable it.
 1702          */
 1703         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1704                 ifp->if_capenable &= ~IFCAP_RXCSUM;
 1705 
 1706         /*
 1707          * Tell the upper layer(s) we support long frames.
 1708          * Must appear after the call to ether_ifattach() because
 1709          * ether_ifattach() sets ifi_hdrlen to the default value.
 1710          */
 1711         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 1712 
 1713         /*
 1714          * Do miibus setup.
 1715          */
 1716         MSK_IF_UNLOCK(sc_if);
 1717         error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
 1718             msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
 1719             mmd->mii_flags);
 1720         if (error != 0) {
 1721                 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
 1722                 ether_ifdetach(ifp);
 1723                 error = ENXIO;
 1724                 goto fail;
 1725         }
 1726 
 1727 fail:
 1728         if (error != 0) {
 1729                 /* Access should be ok even though lock has been dropped */
 1730                 sc->msk_if[port] = NULL;
 1731                 msk_detach(dev);
 1732         }
 1733 
 1734         return (error);
 1735 }
 1736 
 1737 /*
 1738  * Attach the interface. Allocate softc structures, do ifmedia
 1739  * setup and ethernet/BPF attach.
 1740  */
 1741 static int
 1742 mskc_attach(device_t dev)
 1743 {
 1744         struct msk_softc *sc;
 1745         struct msk_mii_data *mmd;
 1746         int error, msic, msir, reg;
 1747 
 1748         sc = device_get_softc(dev);
 1749         sc->msk_dev = dev;
 1750         mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1751             MTX_DEF);
 1752 
 1753         /*
 1754          * Map control/status registers.
 1755          */
 1756         pci_enable_busmaster(dev);
 1757 
 1758         /* Allocate I/O resource */
 1759 #ifdef MSK_USEIOSPACE
 1760         sc->msk_res_spec = msk_res_spec_io;
 1761 #else
 1762         sc->msk_res_spec = msk_res_spec_mem;
 1763 #endif
 1764         sc->msk_irq_spec = msk_irq_spec_legacy;
 1765         error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1766         if (error) {
 1767                 if (sc->msk_res_spec == msk_res_spec_mem)
 1768                         sc->msk_res_spec = msk_res_spec_io;
 1769                 else
 1770                         sc->msk_res_spec = msk_res_spec_mem;
 1771                 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1772                 if (error) {
 1773                         device_printf(dev, "couldn't allocate %s resources\n",
 1774                             sc->msk_res_spec == msk_res_spec_mem ? "memory" :
 1775                             "I/O");
 1776                         mtx_destroy(&sc->msk_mtx);
 1777                         return (ENXIO);
 1778                 }
 1779         }
 1780 
 1781         /* Enable all clocks before accessing any registers. */
 1782         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 1783 
 1784         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1785         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1786         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1787         /* Bail out if chip is not recognized. */
 1788         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1789             sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
 1790             sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
 1791                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1792                     sc->msk_hw_id, sc->msk_hw_rev);
 1793                 mtx_destroy(&sc->msk_mtx);
 1794                 return (ENXIO);
 1795         }
 1796 
 1797         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1798             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1799             OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 1800             &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
 1801             "max number of Rx events to process");
 1802 
 1803         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1804         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1805             "process_limit", &sc->msk_process_limit);
 1806         if (error == 0) {
 1807                 if (sc->msk_process_limit < MSK_PROC_MIN ||
 1808                     sc->msk_process_limit > MSK_PROC_MAX) {
 1809                         device_printf(dev, "process_limit value out of range; "
 1810                             "using default: %d\n", MSK_PROC_DEFAULT);
 1811                         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1812                 }
 1813         }
 1814 
 1815         sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
 1816         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
 1817             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
 1818             "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
 1819             "Maximum number of time to delay interrupts");
 1820         resource_int_value(device_get_name(dev), device_get_unit(dev),
 1821             "int_holdoff", &sc->msk_int_holdoff);
 1822 
 1823         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1824         /* Check number of MACs. */
 1825         sc->msk_num_port = 1;
 1826         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1827             CFG_DUAL_MAC_MSK) {
 1828                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1829                         sc->msk_num_port++;
 1830         }
 1831 
 1832         /* Check bus type. */
 1833         if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
 1834                 sc->msk_bustype = MSK_PEX_BUS;
 1835                 sc->msk_expcap = reg;
 1836         } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
 1837                 sc->msk_bustype = MSK_PCIX_BUS;
 1838                 sc->msk_pcixcap = reg;
 1839         } else
 1840                 sc->msk_bustype = MSK_PCI_BUS;
 1841 
 1842         switch (sc->msk_hw_id) {
 1843         case CHIP_ID_YUKON_EC:
 1844                 sc->msk_clock = 125;    /* 125 MHz */
 1845                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1846                 break;
 1847         case CHIP_ID_YUKON_EC_U:
 1848                 sc->msk_clock = 125;    /* 125 MHz */
 1849                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
 1850                 break;
 1851         case CHIP_ID_YUKON_EX:
 1852                 sc->msk_clock = 125;    /* 125 MHz */
 1853                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1854                     MSK_FLAG_AUTOTX_CSUM;
 1855                 /*
 1856                  * Yukon Extreme seems to have silicon bug for
 1857                  * automatic Tx checksum calculation capability.
 1858                  */
 1859                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 1860                         sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
 1861                 /*
 1862                  * Yukon Extreme A0 could not use store-and-forward
 1863                  * for jumbo frames, so disable Tx checksum
 1864                  * offloading for jumbo frames.
 1865                  */
 1866                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 1867                         sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
 1868                 break;
 1869         case CHIP_ID_YUKON_FE:
 1870                 sc->msk_clock = 100;    /* 100 MHz */
 1871                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1872                 break;
 1873         case CHIP_ID_YUKON_FE_P:
 1874                 sc->msk_clock = 50;     /* 50 MHz */
 1875                 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
 1876                     MSK_FLAG_AUTOTX_CSUM;
 1877                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1878                         /*
 1879                          * XXX
 1880                          * FE+ A0 has status LE writeback bug so msk(4)
 1881                          * does not rely on status word of received frame
 1882                          * in msk_rxeof() which in turn disables all
 1883                          * hardware assistance bits reported by the status
 1884                          * word as well as validity of the received frame.
 1885                          * Just pass received frames to upper stack with
 1886                          * minimal test and let upper stack handle them.
 1887                          */
 1888                         sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
 1889                             MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
 1890                 }
 1891                 break;
 1892         case CHIP_ID_YUKON_XL:
 1893                 sc->msk_clock = 156;    /* 156 MHz */
 1894                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1895                 break;
 1896         case CHIP_ID_YUKON_SUPR:
 1897                 sc->msk_clock = 125;    /* 125 MHz */
 1898                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1899                     MSK_FLAG_AUTOTX_CSUM;
 1900                 break;
 1901         case CHIP_ID_YUKON_UL_2:
 1902                 sc->msk_clock = 125;    /* 125 MHz */
 1903                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1904                 break;
 1905         case CHIP_ID_YUKON_OPT:
 1906                 sc->msk_clock = 125;    /* 125 MHz */
 1907                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
 1908                 break;
 1909         default:
 1910                 sc->msk_clock = 156;    /* 156 MHz */
 1911                 break;
 1912         }
 1913 
 1914         /* Allocate IRQ resources. */
 1915         msic = pci_msi_count(dev);
 1916         if (bootverbose)
 1917                 device_printf(dev, "MSI count : %d\n", msic);
 1918         if (legacy_intr != 0)
 1919                 msi_disable = 1;
 1920         if (msi_disable == 0 && msic > 0) {
 1921                 msir = 1;
 1922                 if (pci_alloc_msi(dev, &msir) == 0) {
 1923                         if (msir == 1) {
 1924                                 sc->msk_pflags |= MSK_FLAG_MSI;
 1925                                 sc->msk_irq_spec = msk_irq_spec_msi;
 1926                         } else
 1927                                 pci_release_msi(dev);
 1928                 }
 1929         }
 1930 
 1931         error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1932         if (error) {
 1933                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1934                 goto fail;
 1935         }
 1936 
 1937         if ((error = msk_status_dma_alloc(sc)) != 0)
 1938                 goto fail;
 1939 
 1940         /* Set base interrupt mask. */
 1941         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1942         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1943             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1944 
 1945         /* Reset the adapter. */
 1946         mskc_reset(sc);
 1947 
 1948         if ((error = mskc_setup_rambuffer(sc)) != 0)
 1949                 goto fail;
 1950 
 1951         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1952         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1953                 device_printf(dev, "failed to add child for PORT_A\n");
 1954                 error = ENXIO;
 1955                 goto fail;
 1956         }
 1957         mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
 1958         mmd->port = MSK_PORT_A;
 1959         mmd->pmd = sc->msk_pmd;
 1960         mmd->mii_flags |= MIIF_DOPAUSE;
 1961         if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1962                 mmd->mii_flags |= MIIF_HAVEFIBER;
 1963         if (sc->msk_pmd == 'P')
 1964                 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1965         device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
 1966 
 1967         if (sc->msk_num_port > 1) {
 1968                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1969                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1970                         device_printf(dev, "failed to add child for PORT_B\n");
 1971                         error = ENXIO;
 1972                         goto fail;
 1973                 }
 1974                 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
 1975                     M_ZERO);
 1976                 mmd->port = MSK_PORT_B;
 1977                 mmd->pmd = sc->msk_pmd;
 1978                 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1979                         mmd->mii_flags |= MIIF_HAVEFIBER;
 1980                 if (sc->msk_pmd == 'P')
 1981                         mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1982                 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
 1983         }
 1984 
 1985         error = bus_generic_attach(dev);
 1986         if (error) {
 1987                 device_printf(dev, "failed to attach port(s)\n");
 1988                 goto fail;
 1989         }
 1990 
 1991         /* Hook interrupt last to avoid having to lock softc. */
 1992         error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 1993             INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
 1994         if (error != 0) {
 1995                 device_printf(dev, "couldn't set up interrupt handler\n");
 1996                 goto fail;
 1997         }
 1998 fail:
 1999         if (error != 0)
 2000                 mskc_detach(dev);
 2001 
 2002         return (error);
 2003 }
 2004 
 2005 /*
 2006  * Shutdown hardware and free up resources. This can be called any
 2007  * time after the mutex has been initialized. It is called in both
 2008  * the error case in attach and the normal detach case so it needs
 2009  * to be careful about only freeing resources that have actually been
 2010  * allocated.
 2011  */
 2012 static int
 2013 msk_detach(device_t dev)
 2014 {
 2015         struct msk_softc *sc;
 2016         struct msk_if_softc *sc_if;
 2017         struct ifnet *ifp;
 2018 
 2019         sc_if = device_get_softc(dev);
 2020         KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
 2021             ("msk mutex not initialized in msk_detach"));
 2022         MSK_IF_LOCK(sc_if);
 2023 
 2024         ifp = sc_if->msk_ifp;
 2025         if (device_is_attached(dev)) {
 2026                 /* XXX */
 2027                 sc_if->msk_flags |= MSK_FLAG_DETACH;
 2028                 msk_stop(sc_if);
 2029                 /* Can't hold locks while calling detach. */
 2030                 MSK_IF_UNLOCK(sc_if);
 2031                 callout_drain(&sc_if->msk_tick_ch);
 2032                 if (ifp)
 2033                         ether_ifdetach(ifp);
 2034                 MSK_IF_LOCK(sc_if);
 2035         }
 2036 
 2037         /*
 2038          * We're generally called from mskc_detach() which is using
 2039          * device_delete_child() to get to here. It's already trashed
 2040          * miibus for us, so don't do it here or we'll panic.
 2041          *
 2042          * if (sc_if->msk_miibus != NULL) {
 2043          *      device_delete_child(dev, sc_if->msk_miibus);
 2044          *      sc_if->msk_miibus = NULL;
 2045          * }
 2046          */
 2047 
 2048         msk_rx_dma_jfree(sc_if);
 2049         msk_txrx_dma_free(sc_if);
 2050         bus_generic_detach(dev);
 2051 
 2052         sc = sc_if->msk_softc;
 2053         sc->msk_if[sc_if->msk_port] = NULL;
 2054         MSK_IF_UNLOCK(sc_if);
 2055         if (ifp)
 2056                 if_free(ifp);
 2057 
 2058         return (0);
 2059 }
 2060 
 2061 static int
 2062 mskc_detach(device_t dev)
 2063 {
 2064         struct msk_softc *sc;
 2065 
 2066         sc = device_get_softc(dev);
 2067         KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
 2068 
 2069         if (device_is_alive(dev)) {
 2070                 if (sc->msk_devs[MSK_PORT_A] != NULL) {
 2071                         free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
 2072                             M_DEVBUF);
 2073                         device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
 2074                 }
 2075                 if (sc->msk_devs[MSK_PORT_B] != NULL) {
 2076                         free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
 2077                             M_DEVBUF);
 2078                         device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
 2079                 }
 2080                 bus_generic_detach(dev);
 2081         }
 2082 
 2083         /* Disable all interrupts. */
 2084         CSR_WRITE_4(sc, B0_IMSK, 0);
 2085         CSR_READ_4(sc, B0_IMSK);
 2086         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2087         CSR_READ_4(sc, B0_HWE_IMSK);
 2088 
 2089         /* LED Off. */
 2090         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 2091 
 2092         /* Put hardware reset. */
 2093         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2094 
 2095         msk_status_dma_free(sc);
 2096 
 2097         if (sc->msk_intrhand) {
 2098                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
 2099                 sc->msk_intrhand = NULL;
 2100         }
 2101         bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 2102         if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
 2103                 pci_release_msi(dev);
 2104         bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
 2105         mtx_destroy(&sc->msk_mtx);
 2106 
 2107         return (0);
 2108 }
 2109 
 2110 static bus_dma_tag_t
 2111 mskc_get_dma_tag(device_t bus, device_t child __unused)
 2112 {
 2113 
 2114         return (bus_get_dma_tag(bus));
 2115 }
 2116 
 2117 struct msk_dmamap_arg {
 2118         bus_addr_t      msk_busaddr;
 2119 };
 2120 
 2121 static void
 2122 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2123 {
 2124         struct msk_dmamap_arg *ctx;
 2125 
 2126         if (error != 0)
 2127                 return;
 2128         ctx = arg;
 2129         ctx->msk_busaddr = segs[0].ds_addr;
 2130 }
 2131 
 2132 /* Create status DMA region. */
 2133 static int
 2134 msk_status_dma_alloc(struct msk_softc *sc)
 2135 {
 2136         struct msk_dmamap_arg ctx;
 2137         bus_size_t stat_sz;
 2138         int count, error;
 2139 
 2140         /*
 2141          * It seems controller requires number of status LE entries
 2142          * is power of 2 and the maximum number of status LE entries
 2143          * is 4096.  For dual-port controllers, the number of status
 2144          * LE entries should be large enough to hold both port's
 2145          * status updates.
 2146          */
 2147         count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
 2148         count = imin(4096, roundup2(count, 1024));
 2149         sc->msk_stat_count = count;
 2150         stat_sz = count * sizeof(struct msk_stat_desc);
 2151         error = bus_dma_tag_create(
 2152                     bus_get_dma_tag(sc->msk_dev),       /* parent */
 2153                     MSK_STAT_ALIGN, 0,          /* alignment, boundary */
 2154                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2155                     BUS_SPACE_MAXADDR,          /* highaddr */
 2156                     NULL, NULL,                 /* filter, filterarg */
 2157                     stat_sz,                    /* maxsize */
 2158                     1,                          /* nsegments */
 2159                     stat_sz,                    /* maxsegsize */
 2160                     0,                          /* flags */
 2161                     NULL, NULL,                 /* lockfunc, lockarg */
 2162                     &sc->msk_stat_tag);
 2163         if (error != 0) {
 2164                 device_printf(sc->msk_dev,
 2165                     "failed to create status DMA tag\n");
 2166                 return (error);
 2167         }
 2168 
 2169         /* Allocate DMA'able memory and load the DMA map for status ring. */
 2170         error = bus_dmamem_alloc(sc->msk_stat_tag,
 2171             (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
 2172             BUS_DMA_ZERO, &sc->msk_stat_map);
 2173         if (error != 0) {
 2174                 device_printf(sc->msk_dev,
 2175                     "failed to allocate DMA'able memory for status ring\n");
 2176                 return (error);
 2177         }
 2178 
 2179         ctx.msk_busaddr = 0;
 2180         error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
 2181             sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2182         if (error != 0) {
 2183                 device_printf(sc->msk_dev,
 2184                     "failed to load DMA'able memory for status ring\n");
 2185                 return (error);
 2186         }
 2187         sc->msk_stat_ring_paddr = ctx.msk_busaddr;
 2188 
 2189         return (0);
 2190 }
 2191 
 2192 static void
 2193 msk_status_dma_free(struct msk_softc *sc)
 2194 {
 2195 
 2196         /* Destroy status block. */
 2197         if (sc->msk_stat_tag) {
 2198                 if (sc->msk_stat_ring_paddr) {
 2199                         bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 2200                         sc->msk_stat_ring_paddr = 0;
 2201                 }
 2202                 if (sc->msk_stat_ring) {
 2203                         bus_dmamem_free(sc->msk_stat_tag,
 2204                             sc->msk_stat_ring, sc->msk_stat_map);
 2205                         sc->msk_stat_ring = NULL;
 2206                 }
 2207                 bus_dma_tag_destroy(sc->msk_stat_tag);
 2208                 sc->msk_stat_tag = NULL;
 2209         }
 2210 }
 2211 
 2212 static int
 2213 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 2214 {
 2215         struct msk_dmamap_arg ctx;
 2216         struct msk_txdesc *txd;
 2217         struct msk_rxdesc *rxd;
 2218         bus_size_t rxalign;
 2219         int error, i;
 2220 
 2221         /* Create parent DMA tag. */
 2222         error = bus_dma_tag_create(
 2223                     bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
 2224                     1, 0,                       /* alignment, boundary */
 2225                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2226                     BUS_SPACE_MAXADDR,          /* highaddr */
 2227                     NULL, NULL,                 /* filter, filterarg */
 2228                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 2229                     0,                          /* nsegments */
 2230                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 2231                     0,                          /* flags */
 2232                     NULL, NULL,                 /* lockfunc, lockarg */
 2233                     &sc_if->msk_cdata.msk_parent_tag);
 2234         if (error != 0) {
 2235                 device_printf(sc_if->msk_if_dev,
 2236                     "failed to create parent DMA tag\n");
 2237                 goto fail;
 2238         }
 2239         /* Create tag for Tx ring. */
 2240         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2241                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2242                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2243                     BUS_SPACE_MAXADDR,          /* highaddr */
 2244                     NULL, NULL,                 /* filter, filterarg */
 2245                     MSK_TX_RING_SZ,             /* maxsize */
 2246                     1,                          /* nsegments */
 2247                     MSK_TX_RING_SZ,             /* maxsegsize */
 2248                     0,                          /* flags */
 2249                     NULL, NULL,                 /* lockfunc, lockarg */
 2250                     &sc_if->msk_cdata.msk_tx_ring_tag);
 2251         if (error != 0) {
 2252                 device_printf(sc_if->msk_if_dev,
 2253                     "failed to create Tx ring DMA tag\n");
 2254                 goto fail;
 2255         }
 2256 
 2257         /* Create tag for Rx ring. */
 2258         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2259                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2260                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2261                     BUS_SPACE_MAXADDR,          /* highaddr */
 2262                     NULL, NULL,                 /* filter, filterarg */
 2263                     MSK_RX_RING_SZ,             /* maxsize */
 2264                     1,                          /* nsegments */
 2265                     MSK_RX_RING_SZ,             /* maxsegsize */
 2266                     0,                          /* flags */
 2267                     NULL, NULL,                 /* lockfunc, lockarg */
 2268                     &sc_if->msk_cdata.msk_rx_ring_tag);
 2269         if (error != 0) {
 2270                 device_printf(sc_if->msk_if_dev,
 2271                     "failed to create Rx ring DMA tag\n");
 2272                 goto fail;
 2273         }
 2274 
 2275         /* Create tag for Tx buffers. */
 2276         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2277                     1, 0,                       /* alignment, boundary */
 2278                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2279                     BUS_SPACE_MAXADDR,          /* highaddr */
 2280                     NULL, NULL,                 /* filter, filterarg */
 2281                     MSK_TSO_MAXSIZE,            /* maxsize */
 2282                     MSK_MAXTXSEGS,              /* nsegments */
 2283                     MSK_TSO_MAXSGSIZE,          /* maxsegsize */
 2284                     0,                          /* flags */
 2285                     NULL, NULL,                 /* lockfunc, lockarg */
 2286                     &sc_if->msk_cdata.msk_tx_tag);
 2287         if (error != 0) {
 2288                 device_printf(sc_if->msk_if_dev,
 2289                     "failed to create Tx DMA tag\n");
 2290                 goto fail;
 2291         }
 2292 
 2293         rxalign = 1;
 2294         /*
 2295          * Workaround hardware hang which seems to happen when Rx buffer
 2296          * is not aligned on multiple of FIFO word(8 bytes).
 2297          */
 2298         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2299                 rxalign = MSK_RX_BUF_ALIGN;
 2300         /* Create tag for Rx buffers. */
 2301         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2302                     rxalign, 0,                 /* alignment, boundary */
 2303                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2304                     BUS_SPACE_MAXADDR,          /* highaddr */
 2305                     NULL, NULL,                 /* filter, filterarg */
 2306                     MCLBYTES,                   /* maxsize */
 2307                     1,                          /* nsegments */
 2308                     MCLBYTES,                   /* maxsegsize */
 2309                     0,                          /* flags */
 2310                     NULL, NULL,                 /* lockfunc, lockarg */
 2311                     &sc_if->msk_cdata.msk_rx_tag);
 2312         if (error != 0) {
 2313                 device_printf(sc_if->msk_if_dev,
 2314                     "failed to create Rx DMA tag\n");
 2315                 goto fail;
 2316         }
 2317 
 2318         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 2319         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
 2320             (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
 2321             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
 2322         if (error != 0) {
 2323                 device_printf(sc_if->msk_if_dev,
 2324                     "failed to allocate DMA'able memory for Tx ring\n");
 2325                 goto fail;
 2326         }
 2327 
 2328         ctx.msk_busaddr = 0;
 2329         error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
 2330             sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
 2331             MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2332         if (error != 0) {
 2333                 device_printf(sc_if->msk_if_dev,
 2334                     "failed to load DMA'able memory for Tx ring\n");
 2335                 goto fail;
 2336         }
 2337         sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
 2338 
 2339         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 2340         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
 2341             (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
 2342             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
 2343         if (error != 0) {
 2344                 device_printf(sc_if->msk_if_dev,
 2345                     "failed to allocate DMA'able memory for Rx ring\n");
 2346                 goto fail;
 2347         }
 2348 
 2349         ctx.msk_busaddr = 0;
 2350         error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
 2351             sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
 2352             MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2353         if (error != 0) {
 2354                 device_printf(sc_if->msk_if_dev,
 2355                     "failed to load DMA'able memory for Rx ring\n");
 2356                 goto fail;
 2357         }
 2358         sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
 2359 
 2360         /* Create DMA maps for Tx buffers. */
 2361         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2362                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 2363                 txd->tx_m = NULL;
 2364                 txd->tx_dmamap = NULL;
 2365                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
 2366                     &txd->tx_dmamap);
 2367                 if (error != 0) {
 2368                         device_printf(sc_if->msk_if_dev,
 2369                             "failed to create Tx dmamap\n");
 2370                         goto fail;
 2371                 }
 2372         }
 2373         /* Create DMA maps for Rx buffers. */
 2374         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2375             &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
 2376                 device_printf(sc_if->msk_if_dev,
 2377                     "failed to create spare Rx dmamap\n");
 2378                 goto fail;
 2379         }
 2380         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2381                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2382                 rxd->rx_m = NULL;
 2383                 rxd->rx_dmamap = NULL;
 2384                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2385                     &rxd->rx_dmamap);
 2386                 if (error != 0) {
 2387                         device_printf(sc_if->msk_if_dev,
 2388                             "failed to create Rx dmamap\n");
 2389                         goto fail;
 2390                 }
 2391         }
 2392 
 2393 fail:
 2394         return (error);
 2395 }
 2396 
 2397 static int
 2398 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
 2399 {
 2400         struct msk_dmamap_arg ctx;
 2401         struct msk_rxdesc *jrxd;
 2402         bus_size_t rxalign;
 2403         int error, i;
 2404 
 2405         if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 2406                 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2407                 device_printf(sc_if->msk_if_dev,
 2408                     "disabling jumbo frame support\n");
 2409                 return (0);
 2410         }
 2411         /* Create tag for jumbo Rx ring. */
 2412         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2413                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2414                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2415                     BUS_SPACE_MAXADDR,          /* highaddr */
 2416                     NULL, NULL,                 /* filter, filterarg */
 2417                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2418                     1,                          /* nsegments */
 2419                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2420                     0,                          /* flags */
 2421                     NULL, NULL,                 /* lockfunc, lockarg */
 2422                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2423         if (error != 0) {
 2424                 device_printf(sc_if->msk_if_dev,
 2425                     "failed to create jumbo Rx ring DMA tag\n");
 2426                 goto jumbo_fail;
 2427         }
 2428 
 2429         rxalign = 1;
 2430         /*
 2431          * Workaround hardware hang which seems to happen when Rx buffer
 2432          * is not aligned on multiple of FIFO word(8 bytes).
 2433          */
 2434         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2435                 rxalign = MSK_RX_BUF_ALIGN;
 2436         /* Create tag for jumbo Rx buffers. */
 2437         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2438                     rxalign, 0,                 /* alignment, boundary */
 2439                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2440                     BUS_SPACE_MAXADDR,          /* highaddr */
 2441                     NULL, NULL,                 /* filter, filterarg */
 2442                     MJUM9BYTES,                 /* maxsize */
 2443                     1,                          /* nsegments */
 2444                     MJUM9BYTES,                 /* maxsegsize */
 2445                     0,                          /* flags */
 2446                     NULL, NULL,                 /* lockfunc, lockarg */
 2447                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2448         if (error != 0) {
 2449                 device_printf(sc_if->msk_if_dev,
 2450                     "failed to create jumbo Rx DMA tag\n");
 2451                 goto jumbo_fail;
 2452         }
 2453 
 2454         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2455         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2456             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2457             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2458             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2459         if (error != 0) {
 2460                 device_printf(sc_if->msk_if_dev,
 2461                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2462                 goto jumbo_fail;
 2463         }
 2464 
 2465         ctx.msk_busaddr = 0;
 2466         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2467             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2468             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2469             msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2470         if (error != 0) {
 2471                 device_printf(sc_if->msk_if_dev,
 2472                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2473                 goto jumbo_fail;
 2474         }
 2475         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2476 
 2477         /* Create DMA maps for jumbo Rx buffers. */
 2478         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2479             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2480                 device_printf(sc_if->msk_if_dev,
 2481                     "failed to create spare jumbo Rx dmamap\n");
 2482                 goto jumbo_fail;
 2483         }
 2484         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2485                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2486                 jrxd->rx_m = NULL;
 2487                 jrxd->rx_dmamap = NULL;
 2488                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2489                     &jrxd->rx_dmamap);
 2490                 if (error != 0) {
 2491                         device_printf(sc_if->msk_if_dev,
 2492                             "failed to create jumbo Rx dmamap\n");
 2493                         goto jumbo_fail;
 2494                 }
 2495         }
 2496 
 2497         return (0);
 2498 
 2499 jumbo_fail:
 2500         msk_rx_dma_jfree(sc_if);
 2501         device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
 2502             "due to resource shortage\n");
 2503         sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2504         return (error);
 2505 }
 2506 
 2507 static void
 2508 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2509 {
 2510         struct msk_txdesc *txd;
 2511         struct msk_rxdesc *rxd;
 2512         int i;
 2513 
 2514         /* Tx ring. */
 2515         if (sc_if->msk_cdata.msk_tx_ring_tag) {
 2516                 if (sc_if->msk_rdata.msk_tx_ring_paddr)
 2517                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
 2518                             sc_if->msk_cdata.msk_tx_ring_map);
 2519                 if (sc_if->msk_rdata.msk_tx_ring)
 2520                         bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
 2521                             sc_if->msk_rdata.msk_tx_ring,
 2522                             sc_if->msk_cdata.msk_tx_ring_map);
 2523                 sc_if->msk_rdata.msk_tx_ring = NULL;
 2524                 sc_if->msk_rdata.msk_tx_ring_paddr = 0;
 2525                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
 2526                 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
 2527         }
 2528         /* Rx ring. */
 2529         if (sc_if->msk_cdata.msk_rx_ring_tag) {
 2530                 if (sc_if->msk_rdata.msk_rx_ring_paddr)
 2531                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
 2532                             sc_if->msk_cdata.msk_rx_ring_map);
 2533                 if (sc_if->msk_rdata.msk_rx_ring)
 2534                         bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
 2535                             sc_if->msk_rdata.msk_rx_ring,
 2536                             sc_if->msk_cdata.msk_rx_ring_map);
 2537                 sc_if->msk_rdata.msk_rx_ring = NULL;
 2538                 sc_if->msk_rdata.msk_rx_ring_paddr = 0;
 2539                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
 2540                 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
 2541         }
 2542         /* Tx buffers. */
 2543         if (sc_if->msk_cdata.msk_tx_tag) {
 2544                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2545                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2546                         if (txd->tx_dmamap) {
 2547                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2548                                     txd->tx_dmamap);
 2549                                 txd->tx_dmamap = NULL;
 2550                         }
 2551                 }
 2552                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2553                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2554         }
 2555         /* Rx buffers. */
 2556         if (sc_if->msk_cdata.msk_rx_tag) {
 2557                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2558                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2559                         if (rxd->rx_dmamap) {
 2560                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2561                                     rxd->rx_dmamap);
 2562                                 rxd->rx_dmamap = NULL;
 2563                         }
 2564                 }
 2565                 if (sc_if->msk_cdata.msk_rx_sparemap) {
 2566                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2567                             sc_if->msk_cdata.msk_rx_sparemap);
 2568                         sc_if->msk_cdata.msk_rx_sparemap = 0;
 2569                 }
 2570                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2571                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2572         }
 2573         if (sc_if->msk_cdata.msk_parent_tag) {
 2574                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2575                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2576         }
 2577 }
 2578 
 2579 static void
 2580 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
 2581 {
 2582         struct msk_rxdesc *jrxd;
 2583         int i;
 2584 
 2585         /* Jumbo Rx ring. */
 2586         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2587                 if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr)
 2588                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2589                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2590                 if (sc_if->msk_rdata.msk_jumbo_rx_ring)
 2591                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2592                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2593                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2594                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2595                 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0;
 2596                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2597                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2598         }
 2599         /* Jumbo Rx buffers. */
 2600         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2601                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2602                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2603                         if (jrxd->rx_dmamap) {
 2604                                 bus_dmamap_destroy(
 2605                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2606                                     jrxd->rx_dmamap);
 2607                                 jrxd->rx_dmamap = NULL;
 2608                         }
 2609                 }
 2610                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2611                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2612                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2613                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2614                 }
 2615                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2616                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2617         }
 2618 }
 2619 
 2620 static int
 2621 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2622 {
 2623         struct msk_txdesc *txd, *txd_last;
 2624         struct msk_tx_desc *tx_le;
 2625         struct mbuf *m;
 2626         bus_dmamap_t map;
 2627         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2628         uint32_t control, csum, prod, si;
 2629         uint16_t offset, tcp_offset, tso_mtu;
 2630         int error, i, nseg, tso;
 2631 
 2632         MSK_IF_LOCK_ASSERT(sc_if);
 2633 
 2634         tcp_offset = offset = 0;
 2635         m = *m_head;
 2636         if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2637             (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
 2638             ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 2639             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
 2640                 /*
 2641                  * Since mbuf has no protocol specific structure information
 2642                  * in it we have to inspect protocol information here to
 2643                  * setup TSO and checksum offload. I don't know why Marvell
 2644                  * made a such decision in chip design because other GigE
 2645                  * hardwares normally takes care of all these chores in
 2646                  * hardware. However, TSO performance of Yukon II is very
 2647                  * good such that it's worth to implement it.
 2648                  */
 2649                 struct ether_header *eh;
 2650                 struct ip *ip;
 2651                 struct tcphdr *tcp;
 2652 
 2653                 if (M_WRITABLE(m) == 0) {
 2654                         /* Get a writable copy. */
 2655                         m = m_dup(*m_head, M_NOWAIT);
 2656                         m_freem(*m_head);
 2657                         if (m == NULL) {
 2658                                 *m_head = NULL;
 2659                                 return (ENOBUFS);
 2660                         }
 2661                         *m_head = m;
 2662                 }
 2663 
 2664                 offset = sizeof(struct ether_header);
 2665                 m = m_pullup(m, offset);
 2666                 if (m == NULL) {
 2667                         *m_head = NULL;
 2668                         return (ENOBUFS);
 2669                 }
 2670                 eh = mtod(m, struct ether_header *);
 2671                 /* Check if hardware VLAN insertion is off. */
 2672                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2673                         offset = sizeof(struct ether_vlan_header);
 2674                         m = m_pullup(m, offset);
 2675                         if (m == NULL) {
 2676                                 *m_head = NULL;
 2677                                 return (ENOBUFS);
 2678                         }
 2679                 }
 2680                 m = m_pullup(m, offset + sizeof(struct ip));
 2681                 if (m == NULL) {
 2682                         *m_head = NULL;
 2683                         return (ENOBUFS);
 2684                 }
 2685                 ip = (struct ip *)(mtod(m, char *) + offset);
 2686                 offset += (ip->ip_hl << 2);
 2687                 tcp_offset = offset;
 2688                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2689                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2690                         if (m == NULL) {
 2691                                 *m_head = NULL;
 2692                                 return (ENOBUFS);
 2693                         }
 2694                         tcp = (struct tcphdr *)(mtod(m, char *) + offset);
 2695                         offset += (tcp->th_off << 2);
 2696                 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2697                     (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
 2698                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2699                         /*
 2700                          * It seems that Yukon II has Tx checksum offload bug
 2701                          * for small TCP packets that's less than 60 bytes in
 2702                          * size (e.g. TCP window probe packet, pure ACK packet).
 2703                          * Common work around like padding with zeros to make
 2704                          * the frame minimum ethernet frame size didn't work at
 2705                          * all.
 2706                          * Instead of disabling checksum offload completely we
 2707                          * resort to S/W checksum routine when we encounter
 2708                          * short TCP frames.
 2709                          * Short UDP packets appear to be handled correctly by
 2710                          * Yukon II. Also I assume this bug does not happen on
 2711                          * controllers that use newer descriptor format or
 2712                          * automatic Tx checksum calculation.
 2713                          */
 2714                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2715                         if (m == NULL) {
 2716                                 *m_head = NULL;
 2717                                 return (ENOBUFS);
 2718                         }
 2719                         *(uint16_t *)(m->m_data + offset +
 2720                             m->m_pkthdr.csum_data) = in_cksum_skip(m,
 2721                             m->m_pkthdr.len, offset);
 2722                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2723                 }
 2724                 *m_head = m;
 2725         }
 2726 
 2727         prod = sc_if->msk_cdata.msk_tx_prod;
 2728         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2729         txd_last = txd;
 2730         map = txd->tx_dmamap;
 2731         error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
 2732             *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2733         if (error == EFBIG) {
 2734                 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
 2735                 if (m == NULL) {
 2736                         m_freem(*m_head);
 2737                         *m_head = NULL;
 2738                         return (ENOBUFS);
 2739                 }
 2740                 *m_head = m;
 2741                 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
 2742                     map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2743                 if (error != 0) {
 2744                         m_freem(*m_head);
 2745                         *m_head = NULL;
 2746                         return (error);
 2747                 }
 2748         } else if (error != 0)
 2749                 return (error);
 2750         if (nseg == 0) {
 2751                 m_freem(*m_head);
 2752                 *m_head = NULL;
 2753                 return (EIO);
 2754         }
 2755 
 2756         /* Check number of available descriptors. */
 2757         if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
 2758             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
 2759                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
 2760                 return (ENOBUFS);
 2761         }
 2762 
 2763         control = 0;
 2764         tso = 0;
 2765         tx_le = NULL;
 2766 
 2767         /* Check TSO support. */
 2768         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2769                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2770                         tso_mtu = m->m_pkthdr.tso_segsz;
 2771                 else
 2772                         tso_mtu = offset + m->m_pkthdr.tso_segsz;
 2773                 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
 2774                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2775                         tx_le->msk_addr = htole32(tso_mtu);
 2776                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2777                                 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
 2778                         else
 2779                                 tx_le->msk_control =
 2780                                     htole32(OP_LRGLEN | HW_OWNER);
 2781                         sc_if->msk_cdata.msk_tx_cnt++;
 2782                         MSK_INC(prod, MSK_TX_RING_CNT);
 2783                         sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
 2784                 }
 2785                 tso++;
 2786         }
 2787         /* Check if we have a VLAN tag to insert. */
 2788         if ((m->m_flags & M_VLANTAG) != 0) {
 2789                 if (tx_le == NULL) {
 2790                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2791                         tx_le->msk_addr = htole32(0);
 2792                         tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2793                             htons(m->m_pkthdr.ether_vtag));
 2794                         sc_if->msk_cdata.msk_tx_cnt++;
 2795                         MSK_INC(prod, MSK_TX_RING_CNT);
 2796                 } else {
 2797                         tx_le->msk_control |= htole32(OP_VLAN |
 2798                             htons(m->m_pkthdr.ether_vtag));
 2799                 }
 2800                 control |= INS_VLAN;
 2801         }
 2802         /* Check if we have to handle checksum offload. */
 2803         if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
 2804                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
 2805                         control |= CALSUM;
 2806                 else {
 2807                         control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2808                         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2809                                 control |= UDPTCP;
 2810                         /* Checksum write position. */
 2811                         csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
 2812                         /* Checksum start position. */
 2813                         csum |= (uint32_t)tcp_offset << 16;
 2814                         if (csum != sc_if->msk_cdata.msk_last_csum) {
 2815                                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2816                                 tx_le->msk_addr = htole32(csum);
 2817                                 tx_le->msk_control = htole32(1 << 16 |
 2818                                     (OP_TCPLISW | HW_OWNER));
 2819                                 sc_if->msk_cdata.msk_tx_cnt++;
 2820                                 MSK_INC(prod, MSK_TX_RING_CNT);
 2821                                 sc_if->msk_cdata.msk_last_csum = csum;
 2822                         }
 2823                 }
 2824         }
 2825 
 2826 #ifdef MSK_64BIT_DMA
 2827         if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
 2828             sc_if->msk_cdata.msk_tx_high_addr) {
 2829                 sc_if->msk_cdata.msk_tx_high_addr =
 2830                     MSK_ADDR_HI(txsegs[0].ds_addr);
 2831                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2832                 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
 2833                 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2834                 sc_if->msk_cdata.msk_tx_cnt++;
 2835                 MSK_INC(prod, MSK_TX_RING_CNT);
 2836         }
 2837 #endif
 2838         si = prod;
 2839         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2840         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2841         if (tso == 0)
 2842                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2843                     OP_PACKET);
 2844         else
 2845                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2846                     OP_LARGESEND);
 2847         sc_if->msk_cdata.msk_tx_cnt++;
 2848         MSK_INC(prod, MSK_TX_RING_CNT);
 2849 
 2850         for (i = 1; i < nseg; i++) {
 2851                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2852 #ifdef MSK_64BIT_DMA
 2853                 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
 2854                     sc_if->msk_cdata.msk_tx_high_addr) {
 2855                         sc_if->msk_cdata.msk_tx_high_addr =
 2856                             MSK_ADDR_HI(txsegs[i].ds_addr);
 2857                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2858                         tx_le->msk_addr =
 2859                             htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
 2860                         tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2861                         sc_if->msk_cdata.msk_tx_cnt++;
 2862                         MSK_INC(prod, MSK_TX_RING_CNT);
 2863                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2864                 }
 2865 #endif
 2866                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2867                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2868                     OP_BUFFER | HW_OWNER);
 2869                 sc_if->msk_cdata.msk_tx_cnt++;
 2870                 MSK_INC(prod, MSK_TX_RING_CNT);
 2871         }
 2872         /* Update producer index. */
 2873         sc_if->msk_cdata.msk_tx_prod = prod;
 2874 
 2875         /* Set EOP on the last descriptor. */
 2876         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2877         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2878         tx_le->msk_control |= htole32(EOP);
 2879 
 2880         /* Turn the first descriptor ownership to hardware. */
 2881         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2882         tx_le->msk_control |= htole32(HW_OWNER);
 2883 
 2884         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2885         map = txd_last->tx_dmamap;
 2886         txd_last->tx_dmamap = txd->tx_dmamap;
 2887         txd->tx_dmamap = map;
 2888         txd->tx_m = m;
 2889 
 2890         /* Sync descriptors. */
 2891         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2892         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 2893             sc_if->msk_cdata.msk_tx_ring_map,
 2894             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2895 
 2896         return (0);
 2897 }
 2898 
 2899 static void
 2900 msk_start(struct ifnet *ifp)
 2901 {
 2902         struct msk_if_softc *sc_if;
 2903 
 2904         sc_if = ifp->if_softc;
 2905         MSK_IF_LOCK(sc_if);
 2906         msk_start_locked(ifp);
 2907         MSK_IF_UNLOCK(sc_if);
 2908 }
 2909 
 2910 static void
 2911 msk_start_locked(struct ifnet *ifp)
 2912 {
 2913         struct msk_if_softc *sc_if;
 2914         struct mbuf *m_head;
 2915         int enq;
 2916 
 2917         sc_if = ifp->if_softc;
 2918         MSK_IF_LOCK_ASSERT(sc_if);
 2919 
 2920         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2921             IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 2922                 return;
 2923 
 2924         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2925             sc_if->msk_cdata.msk_tx_cnt <
 2926             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
 2927                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2928                 if (m_head == NULL)
 2929                         break;
 2930                 /*
 2931                  * Pack the data into the transmit ring. If we
 2932                  * don't have room, set the OACTIVE flag and wait
 2933                  * for the NIC to drain the ring.
 2934                  */
 2935                 if (msk_encap(sc_if, &m_head) != 0) {
 2936                         if (m_head == NULL)
 2937                                 break;
 2938                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2939                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2940                         break;
 2941                 }
 2942 
 2943                 enq++;
 2944                 /*
 2945                  * If there's a BPF listener, bounce a copy of this frame
 2946                  * to him.
 2947                  */
 2948                 ETHER_BPF_MTAP(ifp, m_head);
 2949         }
 2950 
 2951         if (enq > 0) {
 2952                 /* Transmit */
 2953                 CSR_WRITE_2(sc_if->msk_softc,
 2954                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2955                     sc_if->msk_cdata.msk_tx_prod);
 2956 
 2957                 /* Set a timeout in case the chip goes out to lunch. */
 2958                 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
 2959         }
 2960 }
 2961 
 2962 static void
 2963 msk_watchdog(struct msk_if_softc *sc_if)
 2964 {
 2965         struct ifnet *ifp;
 2966 
 2967         MSK_IF_LOCK_ASSERT(sc_if);
 2968 
 2969         if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
 2970                 return;
 2971         ifp = sc_if->msk_ifp;
 2972         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2973                 if (bootverbose)
 2974                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2975                            "(missed link)\n");
 2976                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2977                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2978                 msk_init_locked(sc_if);
 2979                 return;
 2980         }
 2981 
 2982         if_printf(ifp, "watchdog timeout\n");
 2983         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2984         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2985         msk_init_locked(sc_if);
 2986         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2987                 msk_start_locked(ifp);
 2988 }
 2989 
 2990 static int
 2991 mskc_shutdown(device_t dev)
 2992 {
 2993         struct msk_softc *sc;
 2994         int i;
 2995 
 2996         sc = device_get_softc(dev);
 2997         MSK_LOCK(sc);
 2998         for (i = 0; i < sc->msk_num_port; i++) {
 2999                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3000                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3001                     IFF_DRV_RUNNING) != 0))
 3002                         msk_stop(sc->msk_if[i]);
 3003         }
 3004         MSK_UNLOCK(sc);
 3005 
 3006         /* Put hardware reset. */
 3007         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3008         return (0);
 3009 }
 3010 
 3011 static int
 3012 mskc_suspend(device_t dev)
 3013 {
 3014         struct msk_softc *sc;
 3015         int i;
 3016 
 3017         sc = device_get_softc(dev);
 3018 
 3019         MSK_LOCK(sc);
 3020 
 3021         for (i = 0; i < sc->msk_num_port; i++) {
 3022                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3023                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3024                     IFF_DRV_RUNNING) != 0))
 3025                         msk_stop(sc->msk_if[i]);
 3026         }
 3027 
 3028         /* Disable all interrupts. */
 3029         CSR_WRITE_4(sc, B0_IMSK, 0);
 3030         CSR_READ_4(sc, B0_IMSK);
 3031         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 3032         CSR_READ_4(sc, B0_HWE_IMSK);
 3033 
 3034         msk_phy_power(sc, MSK_PHY_POWERDOWN);
 3035 
 3036         /* Put hardware reset. */
 3037         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3038         sc->msk_pflags |= MSK_FLAG_SUSPEND;
 3039 
 3040         MSK_UNLOCK(sc);
 3041 
 3042         return (0);
 3043 }
 3044 
 3045 static int
 3046 mskc_resume(device_t dev)
 3047 {
 3048         struct msk_softc *sc;
 3049         int i;
 3050 
 3051         sc = device_get_softc(dev);
 3052 
 3053         MSK_LOCK(sc);
 3054 
 3055         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 3056         mskc_reset(sc);
 3057         for (i = 0; i < sc->msk_num_port; i++) {
 3058                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3059                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
 3060                         sc->msk_if[i]->msk_ifp->if_drv_flags &=
 3061                             ~IFF_DRV_RUNNING;
 3062                         msk_init_locked(sc->msk_if[i]);
 3063                 }
 3064         }
 3065         sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
 3066 
 3067         MSK_UNLOCK(sc);
 3068 
 3069         return (0);
 3070 }
 3071 
 3072 #ifndef __NO_STRICT_ALIGNMENT
 3073 static __inline void
 3074 msk_fixup_rx(struct mbuf *m)
 3075 {
 3076         int i;
 3077         uint16_t *src, *dst;
 3078 
 3079         src = mtod(m, uint16_t *);
 3080         dst = src - 3;
 3081 
 3082         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 3083                 *dst++ = *src++;
 3084 
 3085         m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
 3086 }
 3087 #endif
 3088 
 3089 static __inline void
 3090 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
 3091 {
 3092         struct ether_header *eh;
 3093         struct ip *ip;
 3094         struct udphdr *uh;
 3095         int32_t hlen, len, pktlen, temp32;
 3096         uint16_t csum, *opts;
 3097 
 3098         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
 3099                 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3100                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3101                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3102                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3103                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3104                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3105                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3106                                     CSUM_PSEUDO_HDR;
 3107                                 m->m_pkthdr.csum_data = 0xffff;
 3108                         }
 3109                 }
 3110                 return;
 3111         }
 3112         /*
 3113          * Marvell Yukon controllers that support OP_RXCHKS has known
 3114          * to have various Rx checksum offloading bugs. These
 3115          * controllers can be configured to compute simple checksum
 3116          * at two different positions. So we can compute IP and TCP/UDP
 3117          * checksum at the same time. We intentionally have controller
 3118          * compute TCP/UDP checksum twice by specifying the same
 3119          * checksum start position and compare the result. If the value
 3120          * is different it would indicate the hardware logic was wrong.
 3121          */
 3122         if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
 3123                 if (bootverbose)
 3124                         device_printf(sc_if->msk_if_dev,
 3125                             "Rx checksum value mismatch!\n");
 3126                 return;
 3127         }
 3128         pktlen = m->m_pkthdr.len;
 3129         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 3130                 return;
 3131         eh = mtod(m, struct ether_header *);
 3132         if (eh->ether_type != htons(ETHERTYPE_IP))
 3133                 return;
 3134         ip = (struct ip *)(eh + 1);
 3135         if (ip->ip_v != IPVERSION)
 3136                 return;
 3137 
 3138         hlen = ip->ip_hl << 2;
 3139         pktlen -= sizeof(struct ether_header);
 3140         if (hlen < sizeof(struct ip))
 3141                 return;
 3142         if (ntohs(ip->ip_len) < hlen)
 3143                 return;
 3144         if (ntohs(ip->ip_len) != pktlen)
 3145                 return;
 3146         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 3147                 return; /* can't handle fragmented packet. */
 3148 
 3149         switch (ip->ip_p) {
 3150         case IPPROTO_TCP:
 3151                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 3152                         return;
 3153                 break;
 3154         case IPPROTO_UDP:
 3155                 if (pktlen < (hlen + sizeof(struct udphdr)))
 3156                         return;
 3157                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 3158                 if (uh->uh_sum == 0)
 3159                         return; /* no checksum */
 3160                 break;
 3161         default:
 3162                 return;
 3163         }
 3164         csum = bswap16(sc_if->msk_csum & 0xFFFF);
 3165         /* Checksum fixup for IP options. */
 3166         len = hlen - sizeof(struct ip);
 3167         if (len > 0) {
 3168                 opts = (uint16_t *)(ip + 1);
 3169                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 3170                         temp32 = csum - *opts;
 3171                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 3172                         csum = temp32 & 65535;
 3173                 }
 3174         }
 3175         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 3176         m->m_pkthdr.csum_data = csum;
 3177 }
 3178 
 3179 static void
 3180 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3181     int len)
 3182 {
 3183         struct mbuf *m;
 3184         struct ifnet *ifp;
 3185         struct msk_rxdesc *rxd;
 3186         int cons, rxlen;
 3187 
 3188         ifp = sc_if->msk_ifp;
 3189 
 3190         MSK_IF_LOCK_ASSERT(sc_if);
 3191 
 3192         cons = sc_if->msk_cdata.msk_rx_cons;
 3193         do {
 3194                 rxlen = status >> 16;
 3195                 if ((status & GMR_FS_VLAN) != 0 &&
 3196                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3197                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3198                 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
 3199                         /*
 3200                          * For controllers that returns bogus status code
 3201                          * just do minimal check and let upper stack
 3202                          * handle this frame.
 3203                          */
 3204                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 3205                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3206                                 msk_discard_rxbuf(sc_if, cons);
 3207                                 break;
 3208                         }
 3209                 } else if (len > sc_if->msk_framesize ||
 3210                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3211                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3212                         /* Don't count flow-control packet as errors. */
 3213                         if ((status & GMR_FS_GOOD_FC) == 0)
 3214                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3215                         msk_discard_rxbuf(sc_if, cons);
 3216                         break;
 3217                 }
 3218 #ifdef MSK_64BIT_DMA
 3219                 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
 3220                     MSK_RX_RING_CNT];
 3221 #else
 3222                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 3223 #endif
 3224                 m = rxd->rx_m;
 3225                 if (msk_newbuf(sc_if, cons) != 0) {
 3226                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 3227                         /* Reuse old buffer. */
 3228                         msk_discard_rxbuf(sc_if, cons);
 3229                         break;
 3230                 }
 3231                 m->m_pkthdr.rcvif = ifp;
 3232                 m->m_pkthdr.len = m->m_len = len;
 3233 #ifndef __NO_STRICT_ALIGNMENT
 3234                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3235                         msk_fixup_rx(m);
 3236 #endif
 3237                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 3238                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3239                         msk_rxcsum(sc_if, control, m);
 3240                 /* Check for VLAN tagged packets. */
 3241                 if ((status & GMR_FS_VLAN) != 0 &&
 3242                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3243                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3244                         m->m_flags |= M_VLANTAG;
 3245                 }
 3246                 MSK_IF_UNLOCK(sc_if);
 3247                 (*ifp->if_input)(ifp, m);
 3248                 MSK_IF_LOCK(sc_if);
 3249         } while (0);
 3250 
 3251         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 3252         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 3253 }
 3254 
 3255 static void
 3256 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3257     int len)
 3258 {
 3259         struct mbuf *m;
 3260         struct ifnet *ifp;
 3261         struct msk_rxdesc *jrxd;
 3262         int cons, rxlen;
 3263 
 3264         ifp = sc_if->msk_ifp;
 3265 
 3266         MSK_IF_LOCK_ASSERT(sc_if);
 3267 
 3268         cons = sc_if->msk_cdata.msk_rx_cons;
 3269         do {
 3270                 rxlen = status >> 16;
 3271                 if ((status & GMR_FS_VLAN) != 0 &&
 3272                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3273                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3274                 if (len > sc_if->msk_framesize ||
 3275                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3276                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3277                         /* Don't count flow-control packet as errors. */
 3278                         if ((status & GMR_FS_GOOD_FC) == 0)
 3279                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3280                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3281                         break;
 3282                 }
 3283 #ifdef MSK_64BIT_DMA
 3284                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
 3285                     MSK_JUMBO_RX_RING_CNT];
 3286 #else
 3287                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 3288 #endif
 3289                 m = jrxd->rx_m;
 3290                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 3291                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 3292                         /* Reuse old buffer. */
 3293                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3294                         break;
 3295                 }
 3296                 m->m_pkthdr.rcvif = ifp;
 3297                 m->m_pkthdr.len = m->m_len = len;
 3298 #ifndef __NO_STRICT_ALIGNMENT
 3299                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3300                         msk_fixup_rx(m);
 3301 #endif
 3302                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 3303                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3304                         msk_rxcsum(sc_if, control, m);
 3305                 /* Check for VLAN tagged packets. */
 3306                 if ((status & GMR_FS_VLAN) != 0 &&
 3307                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3308                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3309                         m->m_flags |= M_VLANTAG;
 3310                 }
 3311                 MSK_IF_UNLOCK(sc_if);
 3312                 (*ifp->if_input)(ifp, m);
 3313                 MSK_IF_LOCK(sc_if);
 3314         } while (0);
 3315 
 3316         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 3317         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 3318 }
 3319 
 3320 static void
 3321 msk_txeof(struct msk_if_softc *sc_if, int idx)
 3322 {
 3323         struct msk_txdesc *txd;
 3324         struct msk_tx_desc *cur_tx;
 3325         struct ifnet *ifp;
 3326         uint32_t control;
 3327         int cons, prog;
 3328 
 3329         MSK_IF_LOCK_ASSERT(sc_if);
 3330 
 3331         ifp = sc_if->msk_ifp;
 3332 
 3333         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 3334             sc_if->msk_cdata.msk_tx_ring_map,
 3335             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3336         /*
 3337          * Go through our tx ring and free mbufs for those
 3338          * frames that have been sent.
 3339          */
 3340         cons = sc_if->msk_cdata.msk_tx_cons;
 3341         prog = 0;
 3342         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 3343                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 3344                         break;
 3345                 prog++;
 3346                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 3347                 control = le32toh(cur_tx->msk_control);
 3348                 sc_if->msk_cdata.msk_tx_cnt--;
 3349                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3350                 if ((control & EOP) == 0)
 3351                         continue;
 3352                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 3353                 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
 3354                     BUS_DMASYNC_POSTWRITE);
 3355                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 3356 
 3357                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 3358                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 3359                     __func__));
 3360                 m_freem(txd->tx_m);
 3361                 txd->tx_m = NULL;
 3362         }
 3363 
 3364         if (prog > 0) {
 3365                 sc_if->msk_cdata.msk_tx_cons = cons;
 3366                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 3367                         sc_if->msk_watchdog_timer = 0;
 3368                 /* No need to sync LEs as we didn't update LEs. */
 3369         }
 3370 }
 3371 
 3372 static void
 3373 msk_tick(void *xsc_if)
 3374 {
 3375         struct msk_if_softc *sc_if;
 3376         struct mii_data *mii;
 3377 
 3378         sc_if = xsc_if;
 3379 
 3380         MSK_IF_LOCK_ASSERT(sc_if);
 3381 
 3382         mii = device_get_softc(sc_if->msk_miibus);
 3383 
 3384         mii_tick(mii);
 3385         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 3386                 msk_miibus_statchg(sc_if->msk_if_dev);
 3387         msk_handle_events(sc_if->msk_softc);
 3388         msk_watchdog(sc_if);
 3389         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3390 }
 3391 
 3392 static void
 3393 msk_intr_phy(struct msk_if_softc *sc_if)
 3394 {
 3395         uint16_t status;
 3396 
 3397         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3398         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3399         /* Handle FIFO Underrun/Overflow? */
 3400         if ((status & PHY_M_IS_FIFO_ERROR))
 3401                 device_printf(sc_if->msk_if_dev,
 3402                     "PHY FIFO underrun/overflow.\n");
 3403 }
 3404 
 3405 static void
 3406 msk_intr_gmac(struct msk_if_softc *sc_if)
 3407 {
 3408         struct msk_softc *sc;
 3409         uint8_t status;
 3410 
 3411         sc = sc_if->msk_softc;
 3412         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3413 
 3414         /* GMAC Rx FIFO overrun. */
 3415         if ((status & GM_IS_RX_FF_OR) != 0)
 3416                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3417                     GMF_CLI_RX_FO);
 3418         /* GMAC Tx FIFO underrun. */
 3419         if ((status & GM_IS_TX_FF_UR) != 0) {
 3420                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3421                     GMF_CLI_TX_FU);
 3422                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3423                 /*
 3424                  * XXX
 3425                  * In case of Tx underrun, we may need to flush/reset
 3426                  * Tx MAC but that would also require resynchronization
 3427                  * with status LEs. Reinitializing status LEs would
 3428                  * affect other port in dual MAC configuration so it
 3429                  * should be avoided as possible as we can.
 3430                  * Due to lack of documentation it's all vague guess but
 3431                  * it needs more investigation.
 3432                  */
 3433         }
 3434 }
 3435 
 3436 static void
 3437 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3438 {
 3439         struct msk_softc *sc;
 3440 
 3441         sc = sc_if->msk_softc;
 3442         if ((status & Y2_IS_PAR_RD1) != 0) {
 3443                 device_printf(sc_if->msk_if_dev,
 3444                     "RAM buffer read parity error\n");
 3445                 /* Clear IRQ. */
 3446                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3447                     RI_CLR_RD_PERR);
 3448         }
 3449         if ((status & Y2_IS_PAR_WR1) != 0) {
 3450                 device_printf(sc_if->msk_if_dev,
 3451                     "RAM buffer write parity error\n");
 3452                 /* Clear IRQ. */
 3453                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3454                     RI_CLR_WR_PERR);
 3455         }
 3456         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3457                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3458                 /* Clear IRQ. */
 3459                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3460                     GMF_CLI_TX_PE);
 3461         }
 3462         if ((status & Y2_IS_PAR_RX1) != 0) {
 3463                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3464                 /* Clear IRQ. */
 3465                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3466         }
 3467         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3468                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3469                 /* Clear IRQ. */
 3470                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3471         }
 3472 }
 3473 
 3474 static void
 3475 msk_intr_hwerr(struct msk_softc *sc)
 3476 {
 3477         uint32_t status;
 3478         uint32_t tlphead[4];
 3479 
 3480         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3481         /* Time Stamp timer overflow. */
 3482         if ((status & Y2_IS_TIST_OV) != 0)
 3483                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3484         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3485                 /*
 3486                  * PCI Express Error occurred which is not described in PEX
 3487                  * spec.
 3488                  * This error is also mapped either to Master Abort(
 3489                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3490                  * can only be cleared there.
 3491                  */
 3492                 device_printf(sc->msk_dev,
 3493                     "PCI Express protocol violation error\n");
 3494         }
 3495 
 3496         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3497                 uint16_t v16;
 3498 
 3499                 if ((status & Y2_IS_MST_ERR) != 0)
 3500                         device_printf(sc->msk_dev,
 3501                             "unexpected IRQ Status error\n");
 3502                 else
 3503                         device_printf(sc->msk_dev,
 3504                             "unexpected IRQ Master error\n");
 3505                 /* Reset all bits in the PCI status register. */
 3506                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3507                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3508                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3509                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3510                     PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 3511                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3512         }
 3513 
 3514         /* Check for PCI Express Uncorrectable Error. */
 3515         if ((status & Y2_IS_PCI_EXP) != 0) {
 3516                 uint32_t v32;
 3517 
 3518                 /*
 3519                  * On PCI Express bus bridges are called root complexes (RC).
 3520                  * PCI Express errors are recognized by the root complex too,
 3521                  * which requests the system to handle the problem. After
 3522                  * error occurrence it may be that no access to the adapter
 3523                  * may be performed any longer.
 3524                  */
 3525 
 3526                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3527                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3528                         /* Ignore unsupported request error. */
 3529                         device_printf(sc->msk_dev,
 3530                             "Uncorrectable PCI Express error\n");
 3531                 }
 3532                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3533                         int i;
 3534 
 3535                         /* Get TLP header form Log Registers. */
 3536                         for (i = 0; i < 4; i++)
 3537                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3538                                     PEX_HEADER_LOG + i * 4);
 3539                         /* Check for vendor defined broadcast message. */
 3540                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3541                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3542                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3543                                     sc->msk_intrhwemask);
 3544                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3545                         }
 3546                 }
 3547                 /* Clear the interrupt. */
 3548                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3549                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3550                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3551         }
 3552 
 3553         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3554                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3555         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3556                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3557 }
 3558 
 3559 static __inline void
 3560 msk_rxput(struct msk_if_softc *sc_if)
 3561 {
 3562         struct msk_softc *sc;
 3563 
 3564         sc = sc_if->msk_softc;
 3565         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
 3566                 bus_dmamap_sync(
 3567                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3568                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3569                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3570         else
 3571                 bus_dmamap_sync(
 3572                     sc_if->msk_cdata.msk_rx_ring_tag,
 3573                     sc_if->msk_cdata.msk_rx_ring_map,
 3574                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3575         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3576             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3577 }
 3578 
 3579 static int
 3580 msk_handle_events(struct msk_softc *sc)
 3581 {
 3582         struct msk_if_softc *sc_if;
 3583         int rxput[2];
 3584         struct msk_stat_desc *sd;
 3585         uint32_t control, status;
 3586         int cons, len, port, rxprog;
 3587 
 3588         if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
 3589                 return (0);
 3590 
 3591         /* Sync status LEs. */
 3592         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3593             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3594 
 3595         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3596         rxprog = 0;
 3597         cons = sc->msk_stat_cons;
 3598         for (;;) {
 3599                 sd = &sc->msk_stat_ring[cons];
 3600                 control = le32toh(sd->msk_control);
 3601                 if ((control & HW_OWNER) == 0)
 3602                         break;
 3603                 control &= ~HW_OWNER;
 3604                 sd->msk_control = htole32(control);
 3605                 status = le32toh(sd->msk_status);
 3606                 len = control & STLE_LEN_MASK;
 3607                 port = (control >> 16) & 0x01;
 3608                 sc_if = sc->msk_if[port];
 3609                 if (sc_if == NULL) {
 3610                         device_printf(sc->msk_dev, "invalid port opcode "
 3611                             "0x%08x\n", control & STLE_OP_MASK);
 3612                         continue;
 3613                 }
 3614 
 3615                 switch (control & STLE_OP_MASK) {
 3616                 case OP_RXVLAN:
 3617                         sc_if->msk_vtag = ntohs(len);
 3618                         break;
 3619                 case OP_RXCHKSVLAN:
 3620                         sc_if->msk_vtag = ntohs(len);
 3621                         /* FALLTHROUGH */
 3622                 case OP_RXCHKS:
 3623                         sc_if->msk_csum = status;
 3624                         break;
 3625                 case OP_RXSTAT:
 3626                         if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
 3627                                 break;
 3628                         if (sc_if->msk_framesize >
 3629                             (MCLBYTES - MSK_RX_BUF_ALIGN))
 3630                                 msk_jumbo_rxeof(sc_if, status, control, len);
 3631                         else
 3632                                 msk_rxeof(sc_if, status, control, len);
 3633                         rxprog++;
 3634                         /*
 3635                          * Because there is no way to sync single Rx LE
 3636                          * put the DMA sync operation off until the end of
 3637                          * event processing.
 3638                          */
 3639                         rxput[port]++;
 3640                         /* Update prefetch unit if we've passed water mark. */
 3641                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3642                                 msk_rxput(sc_if);
 3643                                 rxput[port] = 0;
 3644                         }
 3645                         break;
 3646                 case OP_TXINDEXLE:
 3647                         if (sc->msk_if[MSK_PORT_A] != NULL)
 3648                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3649                                     status & STLE_TXA1_MSKL);
 3650                         if (sc->msk_if[MSK_PORT_B] != NULL)
 3651                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3652                                     ((status & STLE_TXA2_MSKL) >>
 3653                                     STLE_TXA2_SHIFTL) |
 3654                                     ((len & STLE_TXA2_MSKH) <<
 3655                                     STLE_TXA2_SHIFTH));
 3656                         break;
 3657                 default:
 3658                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3659                             control & STLE_OP_MASK);
 3660                         break;
 3661                 }
 3662                 MSK_INC(cons, sc->msk_stat_count);
 3663                 if (rxprog > sc->msk_process_limit)
 3664                         break;
 3665         }
 3666 
 3667         sc->msk_stat_cons = cons;
 3668         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3669             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3670 
 3671         if (rxput[MSK_PORT_A] > 0)
 3672                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3673         if (rxput[MSK_PORT_B] > 0)
 3674                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3675 
 3676         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3677 }
 3678 
 3679 static void
 3680 msk_intr(void *xsc)
 3681 {
 3682         struct msk_softc *sc;
 3683         struct msk_if_softc *sc_if0, *sc_if1;
 3684         struct ifnet *ifp0, *ifp1;
 3685         uint32_t status;
 3686         int domore;
 3687 
 3688         sc = xsc;
 3689         MSK_LOCK(sc);
 3690 
 3691         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3692         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3693         if (status == 0 || status == 0xffffffff ||
 3694             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3695             (status & sc->msk_intrmask) == 0) {
 3696                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3697                 MSK_UNLOCK(sc);
 3698                 return;
 3699         }
 3700 
 3701         sc_if0 = sc->msk_if[MSK_PORT_A];
 3702         sc_if1 = sc->msk_if[MSK_PORT_B];
 3703         ifp0 = ifp1 = NULL;
 3704         if (sc_if0 != NULL)
 3705                 ifp0 = sc_if0->msk_ifp;
 3706         if (sc_if1 != NULL)
 3707                 ifp1 = sc_if1->msk_ifp;
 3708 
 3709         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3710                 msk_intr_phy(sc_if0);
 3711         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3712                 msk_intr_phy(sc_if1);
 3713         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3714                 msk_intr_gmac(sc_if0);
 3715         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3716                 msk_intr_gmac(sc_if1);
 3717         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3718                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3719                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3720                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3721                 CSR_READ_4(sc, B0_IMSK);
 3722         }
 3723         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3724                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3725                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3726                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3727                 CSR_READ_4(sc, B0_IMSK);
 3728         }
 3729         if ((status & Y2_IS_HW_ERR) != 0)
 3730                 msk_intr_hwerr(sc);
 3731 
 3732         domore = msk_handle_events(sc);
 3733         if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
 3734                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3735 
 3736         /* Reenable interrupts. */
 3737         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3738 
 3739         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3740             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3741                 msk_start_locked(ifp0);
 3742         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3743             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3744                 msk_start_locked(ifp1);
 3745 
 3746         MSK_UNLOCK(sc);
 3747 }
 3748 
 3749 static void
 3750 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3751 {
 3752         struct msk_softc *sc;
 3753         struct ifnet *ifp;
 3754 
 3755         ifp = sc_if->msk_ifp;
 3756         sc = sc_if->msk_softc;
 3757         if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
 3758             sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
 3759             sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
 3760                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3761                     TX_STFW_ENA);
 3762         } else {
 3763                 if (ifp->if_mtu > ETHERMTU) {
 3764                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3765                         CSR_WRITE_4(sc,
 3766                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3767                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3768                         /* Disable Store & Forward mode for Tx. */
 3769                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3770                             TX_STFW_DIS);
 3771                 } else {
 3772                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3773                             TX_STFW_ENA);
 3774                 }
 3775         }
 3776 }
 3777 
 3778 static void
 3779 msk_init(void *xsc)
 3780 {
 3781         struct msk_if_softc *sc_if = xsc;
 3782 
 3783         MSK_IF_LOCK(sc_if);
 3784         msk_init_locked(sc_if);
 3785         MSK_IF_UNLOCK(sc_if);
 3786 }
 3787 
 3788 static void
 3789 msk_init_locked(struct msk_if_softc *sc_if)
 3790 {
 3791         struct msk_softc *sc;
 3792         struct ifnet *ifp;
 3793         struct mii_data  *mii;
 3794         uint8_t *eaddr;
 3795         uint16_t gmac;
 3796         uint32_t reg;
 3797         int error;
 3798 
 3799         MSK_IF_LOCK_ASSERT(sc_if);
 3800 
 3801         ifp = sc_if->msk_ifp;
 3802         sc = sc_if->msk_softc;
 3803         mii = device_get_softc(sc_if->msk_miibus);
 3804 
 3805         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 3806                 return;
 3807 
 3808         error = 0;
 3809         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3810         msk_stop(sc_if);
 3811 
 3812         if (ifp->if_mtu < ETHERMTU)
 3813                 sc_if->msk_framesize = ETHERMTU;
 3814         else
 3815                 sc_if->msk_framesize = ifp->if_mtu;
 3816         sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3817         if (ifp->if_mtu > ETHERMTU &&
 3818             (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 3819                 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 3820                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 3821         }
 3822 
 3823         /* GMAC Control reset. */
 3824         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3825         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3826         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3827         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3828             sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 3829                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3830                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3831                     GMC_BYP_RETR_ON);
 3832 
 3833         /*
 3834          * Initialize GMAC first such that speed/duplex/flow-control
 3835          * parameters are renegotiated when interface is brought up.
 3836          */
 3837         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3838 
 3839         /* Dummy read the Interrupt Source Register. */
 3840         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3841 
 3842         /* Clear MIB stats. */
 3843         msk_stats_clear(sc_if);
 3844 
 3845         /* Disable FCS. */
 3846         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3847 
 3848         /* Setup Transmit Control Register. */
 3849         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3850 
 3851         /* Setup Transmit Flow Control Register. */
 3852         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3853 
 3854         /* Setup Transmit Parameter Register. */
 3855         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3856             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3857             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3858 
 3859         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3860             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3861 
 3862         if (ifp->if_mtu > ETHERMTU)
 3863                 gmac |= GM_SMOD_JUMBO_ENA;
 3864         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3865 
 3866         /* Set station address. */
 3867         eaddr = IF_LLADDR(ifp);
 3868         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
 3869             eaddr[0] | (eaddr[1] << 8));
 3870         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
 3871             eaddr[2] | (eaddr[3] << 8));
 3872         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
 3873             eaddr[4] | (eaddr[5] << 8));
 3874         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
 3875             eaddr[0] | (eaddr[1] << 8));
 3876         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
 3877             eaddr[2] | (eaddr[3] << 8));
 3878         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
 3879             eaddr[4] | (eaddr[5] << 8));
 3880 
 3881         /* Disable interrupts for counter overflows. */
 3882         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3883         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3884         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3885 
 3886         /* Configure Rx MAC FIFO. */
 3887         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3888         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3889         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3890         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3891             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3892                 reg |= GMF_RX_OVER_ON;
 3893         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3894 
 3895         /* Set receive filter. */
 3896         msk_rxfilter(sc_if);
 3897 
 3898         if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 3899                 /* Clear flush mask - HW bug. */
 3900                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
 3901         } else {
 3902                 /* Flush Rx MAC FIFO on any flow control or error. */
 3903                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3904                     GMR_FS_ANY_ERR);
 3905         }
 3906 
 3907         /*
 3908          * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
 3909          * due to hardware hang on receipt of pause frames.
 3910          */
 3911         reg = RX_GMF_FL_THR_DEF + 1;
 3912         /* Another magic for Yukon FE+ - From Linux. */
 3913         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3914             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3915                 reg = 0x178;
 3916         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3917 
 3918         /* Configure Tx MAC FIFO. */
 3919         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3920         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3921         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3922 
 3923         /* Configure hardware VLAN tag insertion/stripping. */
 3924         msk_setvlan(sc_if, ifp);
 3925 
 3926         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3927                 /* Set Rx Pause threshold. */
 3928                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3929                     MSK_ECU_LLPP);
 3930                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3931                     MSK_ECU_ULPP);
 3932                 /* Configure store-and-forward for Tx. */
 3933                 msk_set_tx_stfwd(sc_if);
 3934         }
 3935 
 3936         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3937             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3938                 /* Disable dynamic watermark - from Linux. */
 3939                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3940                 reg &= ~0x03;
 3941                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3942         }
 3943 
 3944         /*
 3945          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3946          * arbiter as we don't use Sync Tx queue.
 3947          */
 3948         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3949             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3950         /* Enable the RAM Interface Arbiter. */
 3951         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3952 
 3953         /* Setup RAM buffer. */
 3954         msk_set_rambuffer(sc_if);
 3955 
 3956         /* Disable Tx sync Queue. */
 3957         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3958 
 3959         /* Setup Tx Queue Bus Memory Interface. */
 3960         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3961         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3962         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3963         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3964         switch (sc->msk_hw_id) {
 3965         case CHIP_ID_YUKON_EC_U:
 3966                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3967                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3968                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3969                             MSK_ECU_TXFF_LEV);
 3970                 }
 3971                 break;
 3972         case CHIP_ID_YUKON_EX:
 3973                 /*
 3974                  * Yukon Extreme seems to have silicon bug for
 3975                  * automatic Tx checksum calculation capability.
 3976                  */
 3977                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 3978                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3979                             F_TX_CHK_AUTO_OFF);
 3980                 break;
 3981         }
 3982 
 3983         /* Setup Rx Queue Bus Memory Interface. */
 3984         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 3985         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 3986         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 3987         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 3988         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 3989             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 3990                 /* MAC Rx RAM Read is controlled by hardware. */
 3991                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 3992         }
 3993 
 3994         msk_set_prefetch(sc, sc_if->msk_txq,
 3995             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 3996         msk_init_tx_ring(sc_if);
 3997 
 3998         /* Disable Rx checksum offload and RSS hash. */
 3999         reg = BMU_DIS_RX_RSS_HASH;
 4000         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 4001             (ifp->if_capenable & IFCAP_RXCSUM) != 0)
 4002                 reg |= BMU_ENA_RX_CHKSUM;
 4003         else
 4004                 reg |= BMU_DIS_RX_CHKSUM;
 4005         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
 4006         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
 4007                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4008                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 4009                     MSK_JUMBO_RX_RING_CNT - 1);
 4010                 error = msk_init_jumbo_rx_ring(sc_if);
 4011          } else {
 4012                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4013                     sc_if->msk_rdata.msk_rx_ring_paddr,
 4014                     MSK_RX_RING_CNT - 1);
 4015                 error = msk_init_rx_ring(sc_if);
 4016         }
 4017         if (error != 0) {
 4018                 device_printf(sc_if->msk_if_dev,
 4019                     "initialization failed: no memory for Rx buffers\n");
 4020                 msk_stop(sc_if);
 4021                 return;
 4022         }
 4023         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 4024             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 4025                 /* Disable flushing of non-ASF packets. */
 4026                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 4027                     GMF_RX_MACSEC_FLUSH_OFF);
 4028         }
 4029 
 4030         /* Configure interrupt handling. */
 4031         if (sc_if->msk_port == MSK_PORT_A) {
 4032                 sc->msk_intrmask |= Y2_IS_PORT_A;
 4033                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 4034         } else {
 4035                 sc->msk_intrmask |= Y2_IS_PORT_B;
 4036                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 4037         }
 4038         /* Configure IRQ moderation mask. */
 4039         CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
 4040         if (sc->msk_int_holdoff > 0) {
 4041                 /* Configure initial IRQ moderation timer value. */
 4042                 CSR_WRITE_4(sc, B2_IRQM_INI,
 4043                     MSK_USECS(sc, sc->msk_int_holdoff));
 4044                 CSR_WRITE_4(sc, B2_IRQM_VAL,
 4045                     MSK_USECS(sc, sc->msk_int_holdoff));
 4046                 /* Start IRQ moderation. */
 4047                 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
 4048         }
 4049         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4050         CSR_READ_4(sc, B0_HWE_IMSK);
 4051         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4052         CSR_READ_4(sc, B0_IMSK);
 4053 
 4054         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 4055         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 4056 
 4057         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4058         mii_mediachg(mii);
 4059 
 4060         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 4061 }
 4062 
 4063 static void
 4064 msk_set_rambuffer(struct msk_if_softc *sc_if)
 4065 {
 4066         struct msk_softc *sc;
 4067         int ltpp, utpp;
 4068 
 4069         sc = sc_if->msk_softc;
 4070         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 4071                 return;
 4072 
 4073         /* Setup Rx Queue. */
 4074         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 4075         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 4076             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4077         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 4078             sc->msk_rxqend[sc_if->msk_port] / 8);
 4079         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 4080             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4081         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 4082             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4083 
 4084         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4085             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 4086         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4087             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 4088         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 4089                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 4090         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 4091         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 4092         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 4093 
 4094         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 4095         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 4096 
 4097         /* Setup Tx Queue. */
 4098         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 4099         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 4100             sc->msk_txqstart[sc_if->msk_port] / 8);
 4101         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 4102             sc->msk_txqend[sc_if->msk_port] / 8);
 4103         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 4104             sc->msk_txqstart[sc_if->msk_port] / 8);
 4105         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 4106             sc->msk_txqstart[sc_if->msk_port] / 8);
 4107         /* Enable Store & Forward for Tx side. */
 4108         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 4109         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 4110         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 4111 }
 4112 
 4113 static void
 4114 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 4115     uint32_t count)
 4116 {
 4117 
 4118         /* Reset the prefetch unit. */
 4119         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4120             PREF_UNIT_RST_SET);
 4121         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4122             PREF_UNIT_RST_CLR);
 4123         /* Set LE base address. */
 4124         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 4125             MSK_ADDR_LO(addr));
 4126         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 4127             MSK_ADDR_HI(addr));
 4128         /* Set the list last index. */
 4129         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 4130             count);
 4131         /* Turn on prefetch unit. */
 4132         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4133             PREF_UNIT_OP_ON);
 4134         /* Dummy read to ensure write. */
 4135         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 4136 }
 4137 
 4138 static void
 4139 msk_stop(struct msk_if_softc *sc_if)
 4140 {
 4141         struct msk_softc *sc;
 4142         struct msk_txdesc *txd;
 4143         struct msk_rxdesc *rxd;
 4144         struct msk_rxdesc *jrxd;
 4145         struct ifnet *ifp;
 4146         uint32_t val;
 4147         int i;
 4148 
 4149         MSK_IF_LOCK_ASSERT(sc_if);
 4150         sc = sc_if->msk_softc;
 4151         ifp = sc_if->msk_ifp;
 4152 
 4153         callout_stop(&sc_if->msk_tick_ch);
 4154         sc_if->msk_watchdog_timer = 0;
 4155 
 4156         /* Disable interrupts. */
 4157         if (sc_if->msk_port == MSK_PORT_A) {
 4158                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 4159                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 4160         } else {
 4161                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 4162                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 4163         }
 4164         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4165         CSR_READ_4(sc, B0_HWE_IMSK);
 4166         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4167         CSR_READ_4(sc, B0_IMSK);
 4168 
 4169         /* Disable Tx/Rx MAC. */
 4170         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4171         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 4172         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 4173         /* Read again to ensure writing. */
 4174         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4175         /* Update stats and clear counters. */
 4176         msk_stats_update(sc_if);
 4177 
 4178         /* Stop Tx BMU. */
 4179         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 4180         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4181         for (i = 0; i < MSK_TIMEOUT; i++) {
 4182                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 4183                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4184                             BMU_STOP);
 4185                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4186                 } else
 4187                         break;
 4188                 DELAY(1);
 4189         }
 4190         if (i == MSK_TIMEOUT)
 4191                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 4192         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 4193             RB_RST_SET | RB_DIS_OP_MD);
 4194 
 4195         /* Disable all GMAC interrupt. */
 4196         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 4197         /* Disable PHY interrupt. */
 4198         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 4199 
 4200         /* Disable the RAM Interface Arbiter. */
 4201         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 4202 
 4203         /* Reset the PCI FIFO of the async Tx queue */
 4204         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4205             BMU_RST_SET | BMU_FIFO_RST);
 4206 
 4207         /* Reset the Tx prefetch units. */
 4208         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 4209             PREF_UNIT_RST_SET);
 4210 
 4211         /* Reset the RAM Buffer async Tx queue. */
 4212         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 4213 
 4214         /* Reset Tx MAC FIFO. */
 4215         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 4216         /* Set Pause Off. */
 4217         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 4218 
 4219         /*
 4220          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 4221          * reach the end of packet and since we can't make sure that we have
 4222          * incoming data, we must reset the BMU while it is not during a DMA
 4223          * transfer. Since it is possible that the Rx path is still active,
 4224          * the Rx RAM buffer will be stopped first, so any possible incoming
 4225          * data will not trigger a DMA. After the RAM buffer is stopped, the
 4226          * BMU is polled until any DMA in progress is ended and only then it
 4227          * will be reset.
 4228          */
 4229 
 4230         /* Disable the RAM Buffer receive queue. */
 4231         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 4232         for (i = 0; i < MSK_TIMEOUT; i++) {
 4233                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 4234                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 4235                         break;
 4236                 DELAY(1);
 4237         }
 4238         if (i == MSK_TIMEOUT)
 4239                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 4240         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 4241             BMU_RST_SET | BMU_FIFO_RST);
 4242         /* Reset the Rx prefetch unit. */
 4243         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 4244             PREF_UNIT_RST_SET);
 4245         /* Reset the RAM Buffer receive queue. */
 4246         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 4247         /* Reset Rx MAC FIFO. */
 4248         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 4249 
 4250         /* Free Rx and Tx mbufs still in the queues. */
 4251         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 4252                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 4253                 if (rxd->rx_m != NULL) {
 4254                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
 4255                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4256                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 4257                             rxd->rx_dmamap);
 4258                         m_freem(rxd->rx_m);
 4259                         rxd->rx_m = NULL;
 4260                 }
 4261         }
 4262         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 4263                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 4264                 if (jrxd->rx_m != NULL) {
 4265                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4266                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4267                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4268                             jrxd->rx_dmamap);
 4269                         m_freem(jrxd->rx_m);
 4270                         jrxd->rx_m = NULL;
 4271                 }
 4272         }
 4273         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 4274                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 4275                 if (txd->tx_m != NULL) {
 4276                         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
 4277                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 4278                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 4279                             txd->tx_dmamap);
 4280                         m_freem(txd->tx_m);
 4281                         txd->tx_m = NULL;
 4282                 }
 4283         }
 4284 
 4285         /*
 4286          * Mark the interface down.
 4287          */
 4288         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 4289         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4290 }
 4291 
 4292 /*
 4293  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
 4294  * counter clears high 16 bits of the counter such that accessing
 4295  * lower 16 bits should be the last operation.
 4296  */
 4297 #define MSK_READ_MIB32(x, y)                                    \
 4298         (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +       \
 4299         (uint32_t)GMAC_READ_2(sc, x, y)
 4300 #define MSK_READ_MIB64(x, y)                                    \
 4301         (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +        \
 4302         (uint64_t)MSK_READ_MIB32(x, y)
 4303 
 4304 static void
 4305 msk_stats_clear(struct msk_if_softc *sc_if)
 4306 {
 4307         struct msk_softc *sc;
 4308         uint32_t reg;
 4309         uint16_t gmac;
 4310         int i;
 4311 
 4312         MSK_IF_LOCK_ASSERT(sc_if);
 4313 
 4314         sc = sc_if->msk_softc;
 4315         /* Set MIB Clear Counter Mode. */
 4316         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4317         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4318         /* Read all MIB Counters with Clear Mode set. */
 4319         for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
 4320                 reg = MSK_READ_MIB32(sc_if->msk_port, i);
 4321         /* Clear MIB Clear Counter Mode. */
 4322         gmac &= ~GM_PAR_MIB_CLR;
 4323         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4324 }
 4325 
 4326 static void
 4327 msk_stats_update(struct msk_if_softc *sc_if)
 4328 {
 4329         struct msk_softc *sc;
 4330         struct ifnet *ifp;
 4331         struct msk_hw_stats *stats;
 4332         uint16_t gmac;
 4333         uint32_t reg;
 4334 
 4335         MSK_IF_LOCK_ASSERT(sc_if);
 4336 
 4337         ifp = sc_if->msk_ifp;
 4338         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4339                 return;
 4340         sc = sc_if->msk_softc;
 4341         stats = &sc_if->msk_stats;
 4342         /* Set MIB Clear Counter Mode. */
 4343         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4344         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4345 
 4346         /* Rx stats. */
 4347         stats->rx_ucast_frames +=
 4348             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
 4349         stats->rx_bcast_frames +=
 4350             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
 4351         stats->rx_pause_frames +=
 4352             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
 4353         stats->rx_mcast_frames +=
 4354             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
 4355         stats->rx_crc_errs +=
 4356             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
 4357         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1);
 4358         stats->rx_good_octets +=
 4359             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
 4360         stats->rx_bad_octets +=
 4361             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
 4362         stats->rx_runts +=
 4363             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
 4364         stats->rx_runt_errs +=
 4365             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
 4366         stats->rx_pkts_64 +=
 4367             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
 4368         stats->rx_pkts_65_127 +=
 4369             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
 4370         stats->rx_pkts_128_255 +=
 4371             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
 4372         stats->rx_pkts_256_511 +=
 4373             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
 4374         stats->rx_pkts_512_1023 +=
 4375             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
 4376         stats->rx_pkts_1024_1518 +=
 4377             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
 4378         stats->rx_pkts_1519_max +=
 4379             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
 4380         stats->rx_pkts_too_long +=
 4381             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
 4382         stats->rx_pkts_jabbers +=
 4383             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
 4384         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2);
 4385         stats->rx_fifo_oflows +=
 4386             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
 4387         reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3);
 4388 
 4389         /* Tx stats. */
 4390         stats->tx_ucast_frames +=
 4391             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
 4392         stats->tx_bcast_frames +=
 4393             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
 4394         stats->tx_pause_frames +=
 4395             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
 4396         stats->tx_mcast_frames +=
 4397             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
 4398         stats->tx_octets +=
 4399             MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
 4400         stats->tx_pkts_64 +=
 4401             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
 4402         stats->tx_pkts_65_127 +=
 4403             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
 4404         stats->tx_pkts_128_255 +=
 4405             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
 4406         stats->tx_pkts_256_511 +=
 4407             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
 4408         stats->tx_pkts_512_1023 +=
 4409             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
 4410         stats->tx_pkts_1024_1518 +=
 4411             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
 4412         stats->tx_pkts_1519_max +=
 4413             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
 4414         reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1);
 4415         stats->tx_colls +=
 4416             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
 4417         stats->tx_late_colls +=
 4418             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
 4419         stats->tx_excess_colls +=
 4420             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
 4421         stats->tx_multi_colls +=
 4422             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
 4423         stats->tx_single_colls +=
 4424             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
 4425         stats->tx_underflows +=
 4426             MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
 4427         /* Clear MIB Clear Counter Mode. */
 4428         gmac &= ~GM_PAR_MIB_CLR;
 4429         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4430 }
 4431 
 4432 static int
 4433 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
 4434 {
 4435         struct msk_softc *sc;
 4436         struct msk_if_softc *sc_if;
 4437         uint32_t result, *stat;
 4438         int off;
 4439 
 4440         sc_if = (struct msk_if_softc *)arg1;
 4441         sc = sc_if->msk_softc;
 4442         off = arg2;
 4443         stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
 4444 
 4445         MSK_IF_LOCK(sc_if);
 4446         result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4447         result += *stat;
 4448         MSK_IF_UNLOCK(sc_if);
 4449 
 4450         return (sysctl_handle_int(oidp, &result, 0, req));
 4451 }
 4452 
 4453 static int
 4454 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
 4455 {
 4456         struct msk_softc *sc;
 4457         struct msk_if_softc *sc_if;
 4458         uint64_t result, *stat;
 4459         int off;
 4460 
 4461         sc_if = (struct msk_if_softc *)arg1;
 4462         sc = sc_if->msk_softc;
 4463         off = arg2;
 4464         stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
 4465 
 4466         MSK_IF_LOCK(sc_if);
 4467         result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4468         result += *stat;
 4469         MSK_IF_UNLOCK(sc_if);
 4470 
 4471         return (sysctl_handle_64(oidp, &result, 0, req));
 4472 }
 4473 
 4474 #undef MSK_READ_MIB32
 4475 #undef MSK_READ_MIB64
 4476 
 4477 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)                            \
 4478         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD,   \
 4479             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,    \
 4480             "IU", d)
 4481 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)                            \
 4482         SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD,    \
 4483             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,    \
 4484             "QU", d)
 4485 
 4486 static void
 4487 msk_sysctl_node(struct msk_if_softc *sc_if)
 4488 {
 4489         struct sysctl_ctx_list *ctx;
 4490         struct sysctl_oid_list *child, *schild;
 4491         struct sysctl_oid *tree;
 4492 
 4493         ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
 4494         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
 4495 
 4496         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
 4497             NULL, "MSK Statistics");
 4498         schild = SYSCTL_CHILDREN(tree);
 4499         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD,
 4500             NULL, "MSK RX Statistics");
 4501         child = SYSCTL_CHILDREN(tree);
 4502         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4503             child, rx_ucast_frames, "Good unicast frames");
 4504         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4505             child, rx_bcast_frames, "Good broadcast frames");
 4506         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4507             child, rx_pause_frames, "Pause frames");
 4508         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4509             child, rx_mcast_frames, "Multicast frames");
 4510         MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
 4511             child, rx_crc_errs, "CRC errors");
 4512         MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
 4513             child, rx_good_octets, "Good octets");
 4514         MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
 4515             child, rx_bad_octets, "Bad octets");
 4516         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4517             child, rx_pkts_64, "64 bytes frames");
 4518         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4519             child, rx_pkts_65_127, "65 to 127 bytes frames");
 4520         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4521             child, rx_pkts_128_255, "128 to 255 bytes frames");
 4522         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4523             child, rx_pkts_256_511, "256 to 511 bytes frames");
 4524         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4525             child, rx_pkts_512_1023, "512 to 1023 bytes frames");
 4526         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4527             child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4528         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4529             child, rx_pkts_1519_max, "1519 to max frames");
 4530         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
 4531             child, rx_pkts_too_long, "frames too long");
 4532         MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
 4533             child, rx_pkts_jabbers, "Jabber errors");
 4534         MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
 4535             child, rx_fifo_oflows, "FIFO overflows");
 4536 
 4537         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD,
 4538             NULL, "MSK TX Statistics");
 4539         child = SYSCTL_CHILDREN(tree);
 4540         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4541             child, tx_ucast_frames, "Unicast frames");
 4542         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4543             child, tx_bcast_frames, "Broadcast frames");
 4544         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4545             child, tx_pause_frames, "Pause frames");
 4546         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4547             child, tx_mcast_frames, "Multicast frames");
 4548         MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
 4549             child, tx_octets, "Octets");
 4550         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4551             child, tx_pkts_64, "64 bytes frames");
 4552         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4553             child, tx_pkts_65_127, "65 to 127 bytes frames");
 4554         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4555             child, tx_pkts_128_255, "128 to 255 bytes frames");
 4556         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4557             child, tx_pkts_256_511, "256 to 511 bytes frames");
 4558         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4559             child, tx_pkts_512_1023, "512 to 1023 bytes frames");
 4560         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4561             child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4562         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4563             child, tx_pkts_1519_max, "1519 to max frames");
 4564         MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
 4565             child, tx_colls, "Collisions");
 4566         MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
 4567             child, tx_late_colls, "Late collisions");
 4568         MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
 4569             child, tx_excess_colls, "Excessive collisions");
 4570         MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
 4571             child, tx_multi_colls, "Multiple collisions");
 4572         MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
 4573             child, tx_single_colls, "Single collisions");
 4574         MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
 4575             child, tx_underflows, "FIFO underflows");
 4576 }
 4577 
 4578 #undef MSK_SYSCTL_STAT32
 4579 #undef MSK_SYSCTL_STAT64
 4580 
 4581 static int
 4582 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 4583 {
 4584         int error, value;
 4585 
 4586         if (!arg1)
 4587                 return (EINVAL);
 4588         value = *(int *)arg1;
 4589         error = sysctl_handle_int(oidp, &value, 0, req);
 4590         if (error || !req->newptr)
 4591                 return (error);
 4592         if (value < low || value > high)
 4593                 return (EINVAL);
 4594         *(int *)arg1 = value;
 4595 
 4596         return (0);
 4597 }
 4598 
 4599 static int
 4600 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
 4601 {
 4602 
 4603         return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
 4604             MSK_PROC_MAX));
 4605 }

Cache object: f56df8ae62232c04c97d130c5370fc30


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.