The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * SPDX-License-Identifier: BSD-4-Clause AND BSD-3-Clause
   50  *
   51  * Copyright (c) 1997, 1998, 1999, 2000
   52  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   53  *
   54  * Redistribution and use in source and binary forms, with or without
   55  * modification, are permitted provided that the following conditions
   56  * are met:
   57  * 1. Redistributions of source code must retain the above copyright
   58  *    notice, this list of conditions and the following disclaimer.
   59  * 2. Redistributions in binary form must reproduce the above copyright
   60  *    notice, this list of conditions and the following disclaimer in the
   61  *    documentation and/or other materials provided with the distribution.
   62  * 3. All advertising materials mentioning features or use of this software
   63  *    must display the following acknowledgement:
   64  *      This product includes software developed by Bill Paul.
   65  * 4. Neither the name of the author nor the names of any co-contributors
   66  *    may be used to endorse or promote products derived from this software
   67  *    without specific prior written permission.
   68  *
   69  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   70  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   71  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   72  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   73  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   74  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   75  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   76  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   77  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   78  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   79  * THE POSSIBILITY OF SUCH DAMAGE.
   80  */
   81 /*-
   82  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   83  *
   84  * Permission to use, copy, modify, and distribute this software for any
   85  * purpose with or without fee is hereby granted, provided that the above
   86  * copyright notice and this permission notice appear in all copies.
   87  *
   88  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   89  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   90  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   91  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   92  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   93  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   94  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   95  */
   96 
   97 /*
   98  * Device driver for the Marvell Yukon II Ethernet controller.
   99  * Due to lack of documentation, this driver is based on the code from
  100  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
  101  */
  102 
  103 #include <sys/cdefs.h>
  104 __FBSDID("$FreeBSD$");
  105 
  106 #include <sys/param.h>
  107 #include <sys/systm.h>
  108 #include <sys/bus.h>
  109 #include <sys/endian.h>
  110 #include <sys/mbuf.h>
  111 #include <sys/malloc.h>
  112 #include <sys/kernel.h>
  113 #include <sys/module.h>
  114 #include <sys/socket.h>
  115 #include <sys/sockio.h>
  116 #include <sys/queue.h>
  117 #include <sys/sysctl.h>
  118 
  119 #include <net/bpf.h>
  120 #include <net/ethernet.h>
  121 #include <net/if.h>
  122 #include <net/if_var.h>
  123 #include <net/if_arp.h>
  124 #include <net/if_dl.h>
  125 #include <net/if_media.h>
  126 #include <net/if_types.h>
  127 #include <net/if_vlan_var.h>
  128 
  129 #include <netinet/in.h>
  130 #include <netinet/in_systm.h>
  131 #include <netinet/ip.h>
  132 #include <netinet/tcp.h>
  133 #include <netinet/udp.h>
  134 
  135 #include <machine/bus.h>
  136 #include <machine/in_cksum.h>
  137 #include <machine/resource.h>
  138 #include <sys/rman.h>
  139 
  140 #include <dev/mii/mii.h>
  141 #include <dev/mii/miivar.h>
  142 
  143 #include <dev/pci/pcireg.h>
  144 #include <dev/pci/pcivar.h>
  145 
  146 #include <dev/msk/if_mskreg.h>
  147 
  148 MODULE_DEPEND(msk, pci, 1, 1, 1);
  149 MODULE_DEPEND(msk, ether, 1, 1, 1);
  150 MODULE_DEPEND(msk, miibus, 1, 1, 1);
  151 
  152 /* "device miibus" required.  See GENERIC if you get errors here. */
  153 #include "miibus_if.h"
  154 
  155 /* Tunables. */
  156 static int msi_disable = 0;
  157 TUNABLE_INT("hw.msk.msi_disable", &msi_disable);
  158 static int legacy_intr = 0;
  159 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr);
  160 static int jumbo_disable = 0;
  161 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable);
  162 
  163 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  164 
  165 /*
  166  * Devices supported by this driver.
  167  */
  168 static const struct msk_product {
  169         uint16_t        msk_vendorid;
  170         uint16_t        msk_deviceid;
  171         const char      *msk_name;
  172 } msk_products[] = {
  173         { VENDORID_SK, DEVICEID_SK_YUKON2,
  174             "SK-9Sxx Gigabit Ethernet" },
  175         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  176             "SK-9Exx Gigabit Ethernet"},
  177         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  178             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  179         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  180             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  181         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  182             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  183         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  184             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  185         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  186             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  187         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  188             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  189         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  190             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  191         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  192             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  193         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  194             "Marvell Yukon 88E8035 Fast Ethernet" },
  195         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  196             "Marvell Yukon 88E8036 Fast Ethernet" },
  197         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  198             "Marvell Yukon 88E8038 Fast Ethernet" },
  199         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  200             "Marvell Yukon 88E8039 Fast Ethernet" },
  201         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  202             "Marvell Yukon 88E8040 Fast Ethernet" },
  203         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  204             "Marvell Yukon 88E8040T Fast Ethernet" },
  205         { VENDORID_MARVELL, DEVICEID_MRVL_8042,
  206             "Marvell Yukon 88E8042 Fast Ethernet" },
  207         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  208             "Marvell Yukon 88E8048 Fast Ethernet" },
  209         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  210             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  211         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  212             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  213         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  214             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  215         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  216             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  217         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  218             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  219         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  220             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  221         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  222             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  223         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  224             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  225         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  226             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  227         { VENDORID_MARVELL, DEVICEID_MRVL_436D,
  228             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  229         { VENDORID_MARVELL, DEVICEID_MRVL_4370,
  230             "Marvell Yukon 88E8075 Gigabit Ethernet" },
  231         { VENDORID_MARVELL, DEVICEID_MRVL_4380,
  232             "Marvell Yukon 88E8057 Gigabit Ethernet" },
  233         { VENDORID_MARVELL, DEVICEID_MRVL_4381,
  234             "Marvell Yukon 88E8059 Gigabit Ethernet" },
  235         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  236             "D-Link 550SX Gigabit Ethernet" },
  237         { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX,
  238             "D-Link 560SX Gigabit Ethernet" },
  239         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  240             "D-Link 560T Gigabit Ethernet" }
  241 };
  242 
  243 static const char *model_name[] = {
  244         "Yukon XL",
  245         "Yukon EC Ultra",
  246         "Yukon EX",
  247         "Yukon EC",
  248         "Yukon FE",
  249         "Yukon FE+",
  250         "Yukon Supreme",
  251         "Yukon Ultra 2",
  252         "Yukon Unknown",
  253         "Yukon Optima",
  254 };
  255 
  256 static int mskc_probe(device_t);
  257 static int mskc_attach(device_t);
  258 static int mskc_detach(device_t);
  259 static int mskc_shutdown(device_t);
  260 static int mskc_setup_rambuffer(struct msk_softc *);
  261 static int mskc_suspend(device_t);
  262 static int mskc_resume(device_t);
  263 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t);
  264 static void mskc_reset(struct msk_softc *);
  265 
  266 static int msk_probe(device_t);
  267 static int msk_attach(device_t);
  268 static int msk_detach(device_t);
  269 
  270 static void msk_tick(void *);
  271 static void msk_intr(void *);
  272 static void msk_intr_phy(struct msk_if_softc *);
  273 static void msk_intr_gmac(struct msk_if_softc *);
  274 static __inline void msk_rxput(struct msk_if_softc *);
  275 static int msk_handle_events(struct msk_softc *);
  276 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  277 static void msk_intr_hwerr(struct msk_softc *);
  278 #ifndef __NO_STRICT_ALIGNMENT
  279 static __inline void msk_fixup_rx(struct mbuf *);
  280 #endif
  281 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *);
  282 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  283 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
  284 static void msk_txeof(struct msk_if_softc *, int);
  285 static int msk_encap(struct msk_if_softc *, struct mbuf **);
  286 static void msk_start(struct ifnet *);
  287 static void msk_start_locked(struct ifnet *);
  288 static int msk_ioctl(struct ifnet *, u_long, caddr_t);
  289 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  290 static void msk_set_rambuffer(struct msk_if_softc *);
  291 static void msk_set_tx_stfwd(struct msk_if_softc *);
  292 static void msk_init(void *);
  293 static void msk_init_locked(struct msk_if_softc *);
  294 static void msk_stop(struct msk_if_softc *);
  295 static void msk_watchdog(struct msk_if_softc *);
  296 static int msk_mediachange(struct ifnet *);
  297 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
  298 static void msk_phy_power(struct msk_softc *, int);
  299 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
  300 static int msk_status_dma_alloc(struct msk_softc *);
  301 static void msk_status_dma_free(struct msk_softc *);
  302 static int msk_txrx_dma_alloc(struct msk_if_softc *);
  303 static int msk_rx_dma_jalloc(struct msk_if_softc *);
  304 static void msk_txrx_dma_free(struct msk_if_softc *);
  305 static void msk_rx_dma_jfree(struct msk_if_softc *);
  306 static int msk_rx_fill(struct msk_if_softc *, int);
  307 static int msk_init_rx_ring(struct msk_if_softc *);
  308 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  309 static void msk_init_tx_ring(struct msk_if_softc *);
  310 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int);
  311 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  312 static int msk_newbuf(struct msk_if_softc *, int);
  313 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  314 
  315 static int msk_phy_readreg(struct msk_if_softc *, int, int);
  316 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
  317 static int msk_miibus_readreg(device_t, int, int);
  318 static int msk_miibus_writereg(device_t, int, int, int);
  319 static void msk_miibus_statchg(device_t);
  320 
  321 static void msk_rxfilter(struct msk_if_softc *);
  322 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
  323 
  324 static void msk_stats_clear(struct msk_if_softc *);
  325 static void msk_stats_update(struct msk_if_softc *);
  326 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS);
  327 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS);
  328 static void msk_sysctl_node(struct msk_if_softc *);
  329 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
  330 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS);
  331 
  332 static device_method_t mskc_methods[] = {
  333         /* Device interface */
  334         DEVMETHOD(device_probe,         mskc_probe),
  335         DEVMETHOD(device_attach,        mskc_attach),
  336         DEVMETHOD(device_detach,        mskc_detach),
  337         DEVMETHOD(device_suspend,       mskc_suspend),
  338         DEVMETHOD(device_resume,        mskc_resume),
  339         DEVMETHOD(device_shutdown,      mskc_shutdown),
  340 
  341         DEVMETHOD(bus_get_dma_tag,      mskc_get_dma_tag),
  342 
  343         DEVMETHOD_END
  344 };
  345 
  346 static driver_t mskc_driver = {
  347         "mskc",
  348         mskc_methods,
  349         sizeof(struct msk_softc)
  350 };
  351 
  352 static devclass_t mskc_devclass;
  353 
  354 static device_method_t msk_methods[] = {
  355         /* Device interface */
  356         DEVMETHOD(device_probe,         msk_probe),
  357         DEVMETHOD(device_attach,        msk_attach),
  358         DEVMETHOD(device_detach,        msk_detach),
  359         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  360 
  361         /* MII interface */
  362         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  363         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  364         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  365 
  366         DEVMETHOD_END
  367 };
  368 
  369 static driver_t msk_driver = {
  370         "msk",
  371         msk_methods,
  372         sizeof(struct msk_if_softc)
  373 };
  374 
  375 static devclass_t msk_devclass;
  376 
  377 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL);
  378 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL);
  379 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
  380 
  381 static struct resource_spec msk_res_spec_io[] = {
  382         { SYS_RES_IOPORT,       PCIR_BAR(1),    RF_ACTIVE },
  383         { -1,                   0,              0 }
  384 };
  385 
  386 static struct resource_spec msk_res_spec_mem[] = {
  387         { SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
  388         { -1,                   0,              0 }
  389 };
  390 
  391 static struct resource_spec msk_irq_spec_legacy[] = {
  392         { SYS_RES_IRQ,          0,              RF_ACTIVE | RF_SHAREABLE },
  393         { -1,                   0,              0 }
  394 };
  395 
  396 static struct resource_spec msk_irq_spec_msi[] = {
  397         { SYS_RES_IRQ,          1,              RF_ACTIVE },
  398         { -1,                   0,              0 }
  399 };
  400 
  401 static int
  402 msk_miibus_readreg(device_t dev, int phy, int reg)
  403 {
  404         struct msk_if_softc *sc_if;
  405 
  406         sc_if = device_get_softc(dev);
  407 
  408         return (msk_phy_readreg(sc_if, phy, reg));
  409 }
  410 
  411 static int
  412 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  413 {
  414         struct msk_softc *sc;
  415         int i, val;
  416 
  417         sc = sc_if->msk_softc;
  418 
  419         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  420             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  421 
  422         for (i = 0; i < MSK_TIMEOUT; i++) {
  423                 DELAY(1);
  424                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  425                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  426                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  427                         break;
  428                 }
  429         }
  430 
  431         if (i == MSK_TIMEOUT) {
  432                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  433                 val = 0;
  434         }
  435 
  436         return (val);
  437 }
  438 
  439 static int
  440 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  441 {
  442         struct msk_if_softc *sc_if;
  443 
  444         sc_if = device_get_softc(dev);
  445 
  446         return (msk_phy_writereg(sc_if, phy, reg, val));
  447 }
  448 
  449 static int
  450 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  451 {
  452         struct msk_softc *sc;
  453         int i;
  454 
  455         sc = sc_if->msk_softc;
  456 
  457         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  458         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  459             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  460         for (i = 0; i < MSK_TIMEOUT; i++) {
  461                 DELAY(1);
  462                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  463                     GM_SMI_CT_BUSY) == 0)
  464                         break;
  465         }
  466         if (i == MSK_TIMEOUT)
  467                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  468 
  469         return (0);
  470 }
  471 
  472 static void
  473 msk_miibus_statchg(device_t dev)
  474 {
  475         struct msk_softc *sc;
  476         struct msk_if_softc *sc_if;
  477         struct mii_data *mii;
  478         struct ifnet *ifp;
  479         uint32_t gmac;
  480 
  481         sc_if = device_get_softc(dev);
  482         sc = sc_if->msk_softc;
  483 
  484         MSK_IF_LOCK_ASSERT(sc_if);
  485 
  486         mii = device_get_softc(sc_if->msk_miibus);
  487         ifp = sc_if->msk_ifp;
  488         if (mii == NULL || ifp == NULL ||
  489             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
  490                 return;
  491 
  492         sc_if->msk_flags &= ~MSK_FLAG_LINK;
  493         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  494             (IFM_AVALID | IFM_ACTIVE)) {
  495                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  496                 case IFM_10_T:
  497                 case IFM_100_TX:
  498                         sc_if->msk_flags |= MSK_FLAG_LINK;
  499                         break;
  500                 case IFM_1000_T:
  501                 case IFM_1000_SX:
  502                 case IFM_1000_LX:
  503                 case IFM_1000_CX:
  504                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  505                                 sc_if->msk_flags |= MSK_FLAG_LINK;
  506                         break;
  507                 default:
  508                         break;
  509                 }
  510         }
  511 
  512         if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) {
  513                 /* Enable Tx FIFO Underrun. */
  514                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  515                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  516                 /*
  517                  * Because mii(4) notify msk(4) that it detected link status
  518                  * change, there is no need to enable automatic
  519                  * speed/flow-control/duplex updates.
  520                  */
  521                 gmac = GM_GPCR_AU_ALL_DIS;
  522                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  523                 case IFM_1000_SX:
  524                 case IFM_1000_T:
  525                         gmac |= GM_GPCR_SPEED_1000;
  526                         break;
  527                 case IFM_100_TX:
  528                         gmac |= GM_GPCR_SPEED_100;
  529                         break;
  530                 case IFM_10_T:
  531                         break;
  532                 }
  533 
  534                 if ((IFM_OPTIONS(mii->mii_media_active) &
  535                     IFM_ETH_RXPAUSE) == 0)
  536                         gmac |= GM_GPCR_FC_RX_DIS;
  537                 if ((IFM_OPTIONS(mii->mii_media_active) &
  538                      IFM_ETH_TXPAUSE) == 0)
  539                         gmac |= GM_GPCR_FC_TX_DIS;
  540                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  541                         gmac |= GM_GPCR_DUP_FULL;
  542                 else
  543                         gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
  544                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  545                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  546                 /* Read again to ensure writing. */
  547                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  548                 gmac = GMC_PAUSE_OFF;
  549                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
  550                         if ((IFM_OPTIONS(mii->mii_media_active) &
  551                             IFM_ETH_RXPAUSE) != 0)
  552                                 gmac = GMC_PAUSE_ON;
  553                 }
  554                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  555 
  556                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  557                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  558                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  559         } else {
  560                 /*
  561                  * Link state changed to down.
  562                  * Disable PHY interrupts.
  563                  */
  564                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  565                 /* Disable Rx/Tx MAC. */
  566                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  567                 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
  568                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  569                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  570                         /* Read again to ensure writing. */
  571                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  572                 }
  573         }
  574 }
  575 
  576 static u_int
  577 msk_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  578 {
  579         uint32_t *mchash = arg;
  580         uint32_t crc;
  581 
  582         crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
  583         /* Just want the 6 least significant bits. */
  584         crc &= 0x3f;
  585         /* Set the corresponding bit in the hash table. */
  586         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  587 
  588         return (1);
  589 }
  590 
  591 static void
  592 msk_rxfilter(struct msk_if_softc *sc_if)
  593 {
  594         struct msk_softc *sc;
  595         struct ifnet *ifp;
  596         uint32_t mchash[2];
  597         uint16_t mode;
  598 
  599         sc = sc_if->msk_softc;
  600 
  601         MSK_IF_LOCK_ASSERT(sc_if);
  602 
  603         ifp = sc_if->msk_ifp;
  604 
  605         bzero(mchash, sizeof(mchash));
  606         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  607         if ((ifp->if_flags & IFF_PROMISC) != 0)
  608                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  609         else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  610                 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
  611                 mchash[0] = 0xffff;
  612                 mchash[1] = 0xffff;
  613         } else {
  614                 mode |= GM_RXCR_UCF_ENA;
  615                 if_foreach_llmaddr(ifp, msk_hash_maddr, mchash);
  616                 if (mchash[0] != 0 || mchash[1] != 0)
  617                         mode |= GM_RXCR_MCF_ENA;
  618         }
  619 
  620         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  621             mchash[0] & 0xffff);
  622         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  623             (mchash[0] >> 16) & 0xffff);
  624         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  625             mchash[1] & 0xffff);
  626         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  627             (mchash[1] >> 16) & 0xffff);
  628         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  629 }
  630 
  631 static void
  632 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  633 {
  634         struct msk_softc *sc;
  635 
  636         sc = sc_if->msk_softc;
  637         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  638                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  639                     RX_VLAN_STRIP_ON);
  640                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  641                     TX_VLAN_TAG_ON);
  642         } else {
  643                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  644                     RX_VLAN_STRIP_OFF);
  645                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  646                     TX_VLAN_TAG_OFF);
  647         }
  648 }
  649 
  650 static int
  651 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
  652 {
  653         uint16_t idx;
  654         int i;
  655 
  656         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  657             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  658                 /* Wait until controller executes OP_TCPSTART command. */
  659                 for (i = 100; i > 0; i--) {
  660                         DELAY(100);
  661                         idx = CSR_READ_2(sc_if->msk_softc,
  662                             Y2_PREF_Q_ADDR(sc_if->msk_rxq,
  663                             PREF_UNIT_GET_IDX_REG));
  664                         if (idx != 0)
  665                                 break;
  666                 }
  667                 if (i == 0) {
  668                         device_printf(sc_if->msk_if_dev,
  669                             "prefetch unit stuck?\n");
  670                         return (ETIMEDOUT);
  671                 }
  672                 /*
  673                  * Fill consumed LE with free buffer. This can be done
  674                  * in Rx handler but we don't want to add special code
  675                  * in fast handler.
  676                  */
  677                 if (jumbo > 0) {
  678                         if (msk_jumbo_newbuf(sc_if, 0) != 0)
  679                                 return (ENOBUFS);
  680                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  681                             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  682                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  683                 } else {
  684                         if (msk_newbuf(sc_if, 0) != 0)
  685                                 return (ENOBUFS);
  686                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  687                             sc_if->msk_cdata.msk_rx_ring_map,
  688                             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  689                 }
  690                 sc_if->msk_cdata.msk_rx_prod = 0;
  691                 CSR_WRITE_2(sc_if->msk_softc,
  692                     Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  693                     sc_if->msk_cdata.msk_rx_prod);
  694         }
  695         return (0);
  696 }
  697 
  698 static int
  699 msk_init_rx_ring(struct msk_if_softc *sc_if)
  700 {
  701         struct msk_ring_data *rd;
  702         struct msk_rxdesc *rxd;
  703         int i, nbuf, prod;
  704 
  705         MSK_IF_LOCK_ASSERT(sc_if);
  706 
  707         sc_if->msk_cdata.msk_rx_cons = 0;
  708         sc_if->msk_cdata.msk_rx_prod = 0;
  709         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  710 
  711         rd = &sc_if->msk_rdata;
  712         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  713         for (i = prod = 0; i < MSK_RX_RING_CNT; i++) {
  714                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  715                 rxd->rx_m = NULL;
  716                 rxd->rx_le = &rd->msk_rx_ring[prod];
  717                 MSK_INC(prod, MSK_RX_RING_CNT);
  718         }
  719         nbuf = MSK_RX_BUF_CNT;
  720         prod = 0;
  721         /* Have controller know how to compute Rx checksum. */
  722         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  723             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  724 #ifdef MSK_64BIT_DMA
  725                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  726                 rxd->rx_m = NULL;
  727                 rxd->rx_le = &rd->msk_rx_ring[prod];
  728                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  729                     ETHER_HDR_LEN);
  730                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  731                 MSK_INC(prod, MSK_RX_RING_CNT);
  732                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  733 #endif
  734                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  735                 rxd->rx_m = NULL;
  736                 rxd->rx_le = &rd->msk_rx_ring[prod];
  737                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  738                     ETHER_HDR_LEN);
  739                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  740                 MSK_INC(prod, MSK_RX_RING_CNT);
  741                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
  742                 nbuf--;
  743         }
  744         for (i = 0; i < nbuf; i++) {
  745                 if (msk_newbuf(sc_if, prod) != 0)
  746                         return (ENOBUFS);
  747                 MSK_RX_INC(prod, MSK_RX_RING_CNT);
  748         }
  749 
  750         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
  751             sc_if->msk_cdata.msk_rx_ring_map,
  752             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  753 
  754         /* Update prefetch unit. */
  755         sc_if->msk_cdata.msk_rx_prod = prod;
  756         CSR_WRITE_2(sc_if->msk_softc,
  757             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  758             (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) %
  759             MSK_RX_RING_CNT);
  760         if (msk_rx_fill(sc_if, 0) != 0)
  761                 return (ENOBUFS);
  762         return (0);
  763 }
  764 
  765 static int
  766 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  767 {
  768         struct msk_ring_data *rd;
  769         struct msk_rxdesc *rxd;
  770         int i, nbuf, prod;
  771 
  772         MSK_IF_LOCK_ASSERT(sc_if);
  773 
  774         sc_if->msk_cdata.msk_rx_cons = 0;
  775         sc_if->msk_cdata.msk_rx_prod = 0;
  776         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  777 
  778         rd = &sc_if->msk_rdata;
  779         bzero(rd->msk_jumbo_rx_ring,
  780             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  781         for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  782                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  783                 rxd->rx_m = NULL;
  784                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  785                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  786         }
  787         nbuf = MSK_RX_BUF_CNT;
  788         prod = 0;
  789         /* Have controller know how to compute Rx checksum. */
  790         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
  791             (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
  792 #ifdef MSK_64BIT_DMA
  793                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  794                 rxd->rx_m = NULL;
  795                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  796                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  797                     ETHER_HDR_LEN);
  798                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  799                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  800                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  801 #endif
  802                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  803                 rxd->rx_m = NULL;
  804                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  805                 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
  806                     ETHER_HDR_LEN);
  807                 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
  808                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  809                 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
  810                 nbuf--;
  811         }
  812         for (i = 0; i < nbuf; i++) {
  813                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  814                         return (ENOBUFS);
  815                 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT);
  816         }
  817 
  818         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  819             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  820             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  821 
  822         /* Update prefetch unit. */
  823         sc_if->msk_cdata.msk_rx_prod = prod;
  824         CSR_WRITE_2(sc_if->msk_softc,
  825             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  826             (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) %
  827             MSK_JUMBO_RX_RING_CNT);
  828         if (msk_rx_fill(sc_if, 1) != 0)
  829                 return (ENOBUFS);
  830         return (0);
  831 }
  832 
  833 static void
  834 msk_init_tx_ring(struct msk_if_softc *sc_if)
  835 {
  836         struct msk_ring_data *rd;
  837         struct msk_txdesc *txd;
  838         int i;
  839 
  840         sc_if->msk_cdata.msk_tso_mtu = 0;
  841         sc_if->msk_cdata.msk_last_csum = 0;
  842         sc_if->msk_cdata.msk_tx_prod = 0;
  843         sc_if->msk_cdata.msk_tx_cons = 0;
  844         sc_if->msk_cdata.msk_tx_cnt = 0;
  845         sc_if->msk_cdata.msk_tx_high_addr = 0;
  846 
  847         rd = &sc_if->msk_rdata;
  848         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  849         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  850                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  851                 txd->tx_m = NULL;
  852                 txd->tx_le = &rd->msk_tx_ring[i];
  853         }
  854 
  855         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
  856             sc_if->msk_cdata.msk_tx_ring_map,
  857             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  858 }
  859 
  860 static __inline void
  861 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  862 {
  863         struct msk_rx_desc *rx_le;
  864         struct msk_rxdesc *rxd;
  865         struct mbuf *m;
  866 
  867 #ifdef MSK_64BIT_DMA
  868         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  869         rx_le = rxd->rx_le;
  870         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  871         MSK_INC(idx, MSK_RX_RING_CNT);
  872 #endif
  873         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  874         m = rxd->rx_m;
  875         rx_le = rxd->rx_le;
  876         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  877 }
  878 
  879 static __inline void
  880 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  881 {
  882         struct msk_rx_desc *rx_le;
  883         struct msk_rxdesc *rxd;
  884         struct mbuf *m;
  885 
  886 #ifdef MSK_64BIT_DMA
  887         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  888         rx_le = rxd->rx_le;
  889         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  890         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  891 #endif
  892         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  893         m = rxd->rx_m;
  894         rx_le = rxd->rx_le;
  895         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  896 }
  897 
  898 static int
  899 msk_newbuf(struct msk_if_softc *sc_if, int idx)
  900 {
  901         struct msk_rx_desc *rx_le;
  902         struct msk_rxdesc *rxd;
  903         struct mbuf *m;
  904         bus_dma_segment_t segs[1];
  905         bus_dmamap_t map;
  906         int nsegs;
  907 
  908         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  909         if (m == NULL)
  910                 return (ENOBUFS);
  911 
  912         m->m_len = m->m_pkthdr.len = MCLBYTES;
  913         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  914                 m_adj(m, ETHER_ALIGN);
  915 #ifndef __NO_STRICT_ALIGNMENT
  916         else
  917                 m_adj(m, MSK_RX_BUF_ALIGN);
  918 #endif
  919 
  920         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag,
  921             sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs,
  922             BUS_DMA_NOWAIT) != 0) {
  923                 m_freem(m);
  924                 return (ENOBUFS);
  925         }
  926         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  927 
  928         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  929 #ifdef MSK_64BIT_DMA
  930         rx_le = rxd->rx_le;
  931         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  932         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  933         MSK_INC(idx, MSK_RX_RING_CNT);
  934         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  935 #endif
  936         if (rxd->rx_m != NULL) {
  937                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  938                     BUS_DMASYNC_POSTREAD);
  939                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  940                 rxd->rx_m = NULL;
  941         }
  942         map = rxd->rx_dmamap;
  943         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  944         sc_if->msk_cdata.msk_rx_sparemap = map;
  945         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  946             BUS_DMASYNC_PREREAD);
  947         rxd->rx_m = m;
  948         rx_le = rxd->rx_le;
  949         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  950         rx_le->msk_control =
  951             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  952 
  953         return (0);
  954 }
  955 
  956 static int
  957 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  958 {
  959         struct msk_rx_desc *rx_le;
  960         struct msk_rxdesc *rxd;
  961         struct mbuf *m;
  962         bus_dma_segment_t segs[1];
  963         bus_dmamap_t map;
  964         int nsegs;
  965 
  966         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
  967         if (m == NULL)
  968                 return (ENOBUFS);
  969         m->m_len = m->m_pkthdr.len = MJUM9BYTES;
  970         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  971                 m_adj(m, ETHER_ALIGN);
  972 #ifndef __NO_STRICT_ALIGNMENT
  973         else
  974                 m_adj(m, MSK_RX_BUF_ALIGN);
  975 #endif
  976 
  977         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  978             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  979             BUS_DMA_NOWAIT) != 0) {
  980                 m_freem(m);
  981                 return (ENOBUFS);
  982         }
  983         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  984 
  985         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  986 #ifdef MSK_64BIT_DMA
  987         rx_le = rxd->rx_le;
  988         rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr));
  989         rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
  990         MSK_INC(idx, MSK_JUMBO_RX_RING_CNT);
  991         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  992 #endif
  993         if (rxd->rx_m != NULL) {
  994                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  995                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
  996                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
  997                     rxd->rx_dmamap);
  998                 rxd->rx_m = NULL;
  999         }
 1000         map = rxd->rx_dmamap;
 1001         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
 1002         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
 1003         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
 1004             BUS_DMASYNC_PREREAD);
 1005         rxd->rx_m = m;
 1006         rx_le = rxd->rx_le;
 1007         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
 1008         rx_le->msk_control =
 1009             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
 1010 
 1011         return (0);
 1012 }
 1013 
 1014 /*
 1015  * Set media options.
 1016  */
 1017 static int
 1018 msk_mediachange(struct ifnet *ifp)
 1019 {
 1020         struct msk_if_softc *sc_if;
 1021         struct mii_data *mii;
 1022         int error;
 1023 
 1024         sc_if = ifp->if_softc;
 1025 
 1026         MSK_IF_LOCK(sc_if);
 1027         mii = device_get_softc(sc_if->msk_miibus);
 1028         error = mii_mediachg(mii);
 1029         MSK_IF_UNLOCK(sc_if);
 1030 
 1031         return (error);
 1032 }
 1033 
 1034 /*
 1035  * Report current media status.
 1036  */
 1037 static void
 1038 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1039 {
 1040         struct msk_if_softc *sc_if;
 1041         struct mii_data *mii;
 1042 
 1043         sc_if = ifp->if_softc;
 1044         MSK_IF_LOCK(sc_if);
 1045         if ((ifp->if_flags & IFF_UP) == 0) {
 1046                 MSK_IF_UNLOCK(sc_if);
 1047                 return;
 1048         }
 1049         mii = device_get_softc(sc_if->msk_miibus);
 1050 
 1051         mii_pollstat(mii);
 1052         ifmr->ifm_active = mii->mii_media_active;
 1053         ifmr->ifm_status = mii->mii_media_status;
 1054         MSK_IF_UNLOCK(sc_if);
 1055 }
 1056 
 1057 static int
 1058 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
 1059 {
 1060         struct msk_if_softc *sc_if;
 1061         struct ifreq *ifr;
 1062         struct mii_data *mii;
 1063         int error, mask, reinit;
 1064 
 1065         sc_if = ifp->if_softc;
 1066         ifr = (struct ifreq *)data;
 1067         error = 0;
 1068 
 1069         switch(command) {
 1070         case SIOCSIFMTU:
 1071                 MSK_IF_LOCK(sc_if);
 1072                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
 1073                         error = EINVAL;
 1074                 else if (ifp->if_mtu != ifr->ifr_mtu) {
 1075                         if (ifr->ifr_mtu > ETHERMTU) {
 1076                                 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 1077                                         error = EINVAL;
 1078                                         MSK_IF_UNLOCK(sc_if);
 1079                                         break;
 1080                                 }
 1081                                 if ((sc_if->msk_flags &
 1082                                     MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1083                                         ifp->if_hwassist &=
 1084                                             ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1085                                         ifp->if_capenable &=
 1086                                             ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1087                                         VLAN_CAPABILITIES(ifp);
 1088                                 }
 1089                         }
 1090                         ifp->if_mtu = ifr->ifr_mtu;
 1091                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1092                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1093                                 msk_init_locked(sc_if);
 1094                         }
 1095                 }
 1096                 MSK_IF_UNLOCK(sc_if);
 1097                 break;
 1098         case SIOCSIFFLAGS:
 1099                 MSK_IF_LOCK(sc_if);
 1100                 if ((ifp->if_flags & IFF_UP) != 0) {
 1101                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1102                             ((ifp->if_flags ^ sc_if->msk_if_flags) &
 1103                             (IFF_PROMISC | IFF_ALLMULTI)) != 0)
 1104                                 msk_rxfilter(sc_if);
 1105                         else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0)
 1106                                 msk_init_locked(sc_if);
 1107                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1108                         msk_stop(sc_if);
 1109                 sc_if->msk_if_flags = ifp->if_flags;
 1110                 MSK_IF_UNLOCK(sc_if);
 1111                 break;
 1112         case SIOCADDMULTI:
 1113         case SIOCDELMULTI:
 1114                 MSK_IF_LOCK(sc_if);
 1115                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1116                         msk_rxfilter(sc_if);
 1117                 MSK_IF_UNLOCK(sc_if);
 1118                 break;
 1119         case SIOCGIFMEDIA:
 1120         case SIOCSIFMEDIA:
 1121                 mii = device_get_softc(sc_if->msk_miibus);
 1122                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
 1123                 break;
 1124         case SIOCSIFCAP:
 1125                 reinit = 0;
 1126                 MSK_IF_LOCK(sc_if);
 1127                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 1128                 if ((mask & IFCAP_TXCSUM) != 0 &&
 1129                     (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
 1130                         ifp->if_capenable ^= IFCAP_TXCSUM;
 1131                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
 1132                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
 1133                         else
 1134                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
 1135                 }
 1136                 if ((mask & IFCAP_RXCSUM) != 0 &&
 1137                     (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
 1138                         ifp->if_capenable ^= IFCAP_RXCSUM;
 1139                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1140                                 reinit = 1;
 1141                 }
 1142                 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
 1143                     (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
 1144                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
 1145                 if ((mask & IFCAP_TSO4) != 0 &&
 1146                     (IFCAP_TSO4 & ifp->if_capabilities) != 0) {
 1147                         ifp->if_capenable ^= IFCAP_TSO4;
 1148                         if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
 1149                                 ifp->if_hwassist |= CSUM_TSO;
 1150                         else
 1151                                 ifp->if_hwassist &= ~CSUM_TSO;
 1152                 }
 1153                 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
 1154                     (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0)
 1155                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
 1156                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
 1157                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
 1158                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 1159                         if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0)
 1160                                 ifp->if_capenable &=
 1161                                     ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
 1162                         msk_setvlan(sc_if, ifp);
 1163                 }
 1164                 if (ifp->if_mtu > ETHERMTU &&
 1165                     (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 1166                         ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 1167                         ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 1168                 }
 1169                 VLAN_CAPABILITIES(ifp);
 1170                 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
 1171                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 1172                         msk_init_locked(sc_if);
 1173                 }
 1174                 MSK_IF_UNLOCK(sc_if);
 1175                 break;
 1176         default:
 1177                 error = ether_ioctl(ifp, command, data);
 1178                 break;
 1179         }
 1180 
 1181         return (error);
 1182 }
 1183 
 1184 static int
 1185 mskc_probe(device_t dev)
 1186 {
 1187         const struct msk_product *mp;
 1188         uint16_t vendor, devid;
 1189         int i;
 1190 
 1191         vendor = pci_get_vendor(dev);
 1192         devid = pci_get_device(dev);
 1193         mp = msk_products;
 1194         for (i = 0; i < nitems(msk_products); i++, mp++) {
 1195                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
 1196                         device_set_desc(dev, mp->msk_name);
 1197                         return (BUS_PROBE_DEFAULT);
 1198                 }
 1199         }
 1200 
 1201         return (ENXIO);
 1202 }
 1203 
 1204 static int
 1205 mskc_setup_rambuffer(struct msk_softc *sc)
 1206 {
 1207         int next;
 1208         int i;
 1209 
 1210         /* Get adapter SRAM size. */
 1211         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
 1212         if (bootverbose)
 1213                 device_printf(sc->msk_dev,
 1214                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
 1215         if (sc->msk_ramsize == 0)
 1216                 return (0);
 1217 
 1218         sc->msk_pflags |= MSK_FLAG_RAMBUF;
 1219         /*
 1220          * Give receiver 2/3 of memory and round down to the multiple
 1221          * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
 1222          * of 1024.
 1223          */
 1224         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
 1225         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
 1226         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
 1227                 sc->msk_rxqstart[i] = next;
 1228                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
 1229                 next = sc->msk_rxqend[i] + 1;
 1230                 sc->msk_txqstart[i] = next;
 1231                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
 1232                 next = sc->msk_txqend[i] + 1;
 1233                 if (bootverbose) {
 1234                         device_printf(sc->msk_dev,
 1235                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1236                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1237                             sc->msk_rxqend[i]);
 1238                         device_printf(sc->msk_dev,
 1239                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1240                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1241                             sc->msk_txqend[i]);
 1242                 }
 1243         }
 1244 
 1245         return (0);
 1246 }
 1247 
 1248 static void
 1249 msk_phy_power(struct msk_softc *sc, int mode)
 1250 {
 1251         uint32_t our, val;
 1252         int i;
 1253 
 1254         switch (mode) {
 1255         case MSK_PHY_POWERUP:
 1256                 /* Switch power to VCC (WA for VAUX problem). */
 1257                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1258                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1259                 /* Disable Core Clock Division, set Clock Select to 0. */
 1260                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1261 
 1262                 val = 0;
 1263                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1264                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1265                         /* Enable bits are inverted. */
 1266                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1267                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1268                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1269                 }
 1270                 /*
 1271                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1272                  */
 1273                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1274 
 1275                 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1276                 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1277                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1278                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1279                                 /* Deassert Low Power for 1st PHY. */
 1280                                 our |= PCI_Y2_PHY1_COMA;
 1281                                 if (sc->msk_num_port > 1)
 1282                                         our |= PCI_Y2_PHY2_COMA;
 1283                         }
 1284                 }
 1285                 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
 1286                     sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1287                     sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
 1288                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
 1289                         val &= (PCI_FORCE_ASPM_REQUEST |
 1290                             PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
 1291                             PCI_ASPM_CLKRUN_REQUEST);
 1292                         /* Set all bits to 0 except bits 15..12. */
 1293                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
 1294                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
 1295                         val &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1296                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
 1297                         CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
 1298                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
 1299                         /*
 1300                          * Disable status race, workaround for
 1301                          * Yukon EC Ultra & Yukon EX.
 1302                          */
 1303                         val = CSR_READ_4(sc, B2_GP_IO);
 1304                         val |= GLB_GPIO_STAT_RACE_DIS;
 1305                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1306                         CSR_READ_4(sc, B2_GP_IO);
 1307                 }
 1308                 /* Release PHY from PowerDown/COMA mode. */
 1309                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
 1310 
 1311                 for (i = 0; i < sc->msk_num_port; i++) {
 1312                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1313                             GMLC_RST_SET);
 1314                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1315                             GMLC_RST_CLR);
 1316                 }
 1317                 break;
 1318         case MSK_PHY_POWERDOWN:
 1319                 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1320                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1321                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1322                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1323                         val &= ~PCI_Y2_PHY1_COMA;
 1324                         if (sc->msk_num_port > 1)
 1325                                 val &= ~PCI_Y2_PHY2_COMA;
 1326                 }
 1327                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1328 
 1329                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1330                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1331                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1332                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1333                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1334                         /* Enable bits are inverted. */
 1335                         val = 0;
 1336                 }
 1337                 /*
 1338                  * Disable PCI & Core Clock, disable clock gating for
 1339                  * both Links.
 1340                  */
 1341                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1342                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1343                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1344                 break;
 1345         default:
 1346                 break;
 1347         }
 1348 }
 1349 
 1350 static void
 1351 mskc_reset(struct msk_softc *sc)
 1352 {
 1353         bus_addr_t addr;
 1354         uint16_t status;
 1355         uint32_t val;
 1356         int i, initram;
 1357 
 1358         /* Disable ASF. */
 1359         if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
 1360             sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
 1361                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1362                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1363                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1364                         status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1365                         /* Clear AHB bridge & microcontroller reset. */
 1366                         status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1367                             Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1368                         /* Clear ASF microcontroller state. */
 1369                         status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1370                         status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
 1371                         CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1372                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1373                 } else
 1374                         CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1375                 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1376                 /*
 1377                  * Since we disabled ASF, S/W reset is required for
 1378                  * Power Management.
 1379                  */
 1380                 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1381                 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1382         }
 1383 
 1384         /* Clear all error bits in the PCI status register. */
 1385         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1386         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1387 
 1388         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1389             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1390             PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 1391         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1392 
 1393         switch (sc->msk_bustype) {
 1394         case MSK_PEX_BUS:
 1395                 /* Clear all PEX errors. */
 1396                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1397                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1398                 if ((val & PEX_RX_OV) != 0) {
 1399                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1400                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1401                 }
 1402                 break;
 1403         case MSK_PCI_BUS:
 1404         case MSK_PCIX_BUS:
 1405                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1406                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1407                 if (val == 0)
 1408                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1409                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1410                         /* Set Cache Line Size opt. */
 1411                         val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
 1412                         val |= PCI_CLS_OPT;
 1413                         pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
 1414                 }
 1415                 break;
 1416         }
 1417         /* Set PHY power state. */
 1418         msk_phy_power(sc, MSK_PHY_POWERUP);
 1419 
 1420         /* Reset GPHY/GMAC Control */
 1421         for (i = 0; i < sc->msk_num_port; i++) {
 1422                 /* GPHY Control reset. */
 1423                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1424                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1425                 /* GMAC Control reset. */
 1426                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1427                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1428                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1429                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1430                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 1431                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1432                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1433                             GMC_BYP_RETR_ON);
 1434         }
 1435 
 1436         if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
 1437             sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
 1438                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
 1439         if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
 1440                 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
 1441                 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
 1442         }
 1443         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1444 
 1445         /* LED On. */
 1446         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1447 
 1448         /* Clear TWSI IRQ. */
 1449         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1450 
 1451         /* Turn off hardware timer. */
 1452         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1453         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1454 
 1455         /* Turn off descriptor polling. */
 1456         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1457 
 1458         /* Turn off time stamps. */
 1459         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1460         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1461 
 1462         initram = 0;
 1463         if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
 1464             sc->msk_hw_id == CHIP_ID_YUKON_EC ||
 1465             sc->msk_hw_id == CHIP_ID_YUKON_FE)
 1466                 initram++;
 1467 
 1468         /* Configure timeout values. */
 1469         for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
 1470                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
 1471                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
 1472                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1473                     MSK_RI_TO_53);
 1474                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1475                     MSK_RI_TO_53);
 1476                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1477                     MSK_RI_TO_53);
 1478                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1479                     MSK_RI_TO_53);
 1480                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1481                     MSK_RI_TO_53);
 1482                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1483                     MSK_RI_TO_53);
 1484                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1485                     MSK_RI_TO_53);
 1486                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1487                     MSK_RI_TO_53);
 1488                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1489                     MSK_RI_TO_53);
 1490                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1491                     MSK_RI_TO_53);
 1492                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1493                     MSK_RI_TO_53);
 1494                 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1495                     MSK_RI_TO_53);
 1496         }
 1497 
 1498         /* Disable all interrupts. */
 1499         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1500         CSR_READ_4(sc, B0_HWE_IMSK);
 1501         CSR_WRITE_4(sc, B0_IMSK, 0);
 1502         CSR_READ_4(sc, B0_IMSK);
 1503 
 1504         /*
 1505          * On dual port PCI-X card, there is an problem where status
 1506          * can be received out of order due to split transactions.
 1507          */
 1508         if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
 1509                 uint16_t pcix_cmd;
 1510 
 1511                 pcix_cmd = pci_read_config(sc->msk_dev,
 1512                     sc->msk_pcixcap + PCIXR_COMMAND, 2);
 1513                 /* Clear Max Outstanding Split Transactions. */
 1514                 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
 1515                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1516                 pci_write_config(sc->msk_dev,
 1517                     sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
 1518                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1519         }
 1520         if (sc->msk_expcap != 0) {
 1521                 /* Change Max. Read Request Size to 2048 bytes. */
 1522                 if (pci_get_max_read_req(sc->msk_dev) == 512)
 1523                         pci_set_max_read_req(sc->msk_dev, 2048);
 1524         }
 1525 
 1526         /* Clear status list. */
 1527         bzero(sc->msk_stat_ring,
 1528             sizeof(struct msk_stat_desc) * sc->msk_stat_count);
 1529         sc->msk_stat_cons = 0;
 1530         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 1531             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1532         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1533         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1534         /* Set the status list base address. */
 1535         addr = sc->msk_stat_ring_paddr;
 1536         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1537         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1538         /* Set the status list last index. */
 1539         CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1);
 1540         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1541             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1542                 /* WA for dev. #4.3 */
 1543                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1544                 /* WA for dev. #4.18 */
 1545                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1546                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1547         } else {
 1548                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1549                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1550                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1551                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1552                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1553                 else
 1554                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1555                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1556         }
 1557         /*
 1558          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1559          */
 1560         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1561 
 1562         /* Enable status unit. */
 1563         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1564 
 1565         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1566         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1567         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1568 }
 1569 
 1570 static int
 1571 msk_probe(device_t dev)
 1572 {
 1573         struct msk_softc *sc;
 1574         char desc[100];
 1575 
 1576         sc = device_get_softc(device_get_parent(dev));
 1577         /*
 1578          * Not much to do here. We always know there will be
 1579          * at least one GMAC present, and if there are two,
 1580          * mskc_attach() will create a second device instance
 1581          * for us.
 1582          */
 1583         snprintf(desc, sizeof(desc),
 1584             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1585             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1586             sc->msk_hw_rev);
 1587         device_set_desc_copy(dev, desc);
 1588 
 1589         return (BUS_PROBE_DEFAULT);
 1590 }
 1591 
 1592 static int
 1593 msk_attach(device_t dev)
 1594 {
 1595         struct msk_softc *sc;
 1596         struct msk_if_softc *sc_if;
 1597         struct ifnet *ifp;
 1598         struct msk_mii_data *mmd;
 1599         int i, port, error;
 1600         uint8_t eaddr[6];
 1601 
 1602         if (dev == NULL)
 1603                 return (EINVAL);
 1604 
 1605         error = 0;
 1606         sc_if = device_get_softc(dev);
 1607         sc = device_get_softc(device_get_parent(dev));
 1608         mmd = device_get_ivars(dev);
 1609         port = mmd->port;
 1610 
 1611         sc_if->msk_if_dev = dev;
 1612         sc_if->msk_port = port;
 1613         sc_if->msk_softc = sc;
 1614         sc_if->msk_flags = sc->msk_pflags;
 1615         sc->msk_if[port] = sc_if;
 1616         /* Setup Tx/Rx queue register offsets. */
 1617         if (port == MSK_PORT_A) {
 1618                 sc_if->msk_txq = Q_XA1;
 1619                 sc_if->msk_txsq = Q_XS1;
 1620                 sc_if->msk_rxq = Q_R1;
 1621         } else {
 1622                 sc_if->msk_txq = Q_XA2;
 1623                 sc_if->msk_txsq = Q_XS2;
 1624                 sc_if->msk_rxq = Q_R2;
 1625         }
 1626 
 1627         callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0);
 1628         msk_sysctl_node(sc_if);
 1629 
 1630         if ((error = msk_txrx_dma_alloc(sc_if)) != 0)
 1631                 goto fail;
 1632         msk_rx_dma_jalloc(sc_if);
 1633 
 1634         ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
 1635         if (ifp == NULL) {
 1636                 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
 1637                 error = ENOSPC;
 1638                 goto fail;
 1639         }
 1640         ifp->if_softc = sc_if;
 1641         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1642         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1643         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
 1644         /*
 1645          * Enable Rx checksum offloading if controller supports
 1646          * new descriptor formant and controller is not Yukon XL.
 1647          */
 1648         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 1649             sc->msk_hw_id != CHIP_ID_YUKON_XL)
 1650                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1651         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1652             (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1653                 ifp->if_capabilities |= IFCAP_RXCSUM;
 1654         ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO;
 1655         ifp->if_capenable = ifp->if_capabilities;
 1656         ifp->if_ioctl = msk_ioctl;
 1657         ifp->if_start = msk_start;
 1658         ifp->if_init = msk_init;
 1659         IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1660         ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1;
 1661         IFQ_SET_READY(&ifp->if_snd);
 1662         /*
 1663          * Get station address for this interface. Note that
 1664          * dual port cards actually come with three station
 1665          * addresses: one for each port, plus an extra. The
 1666          * extra one is used by the SysKonnect driver software
 1667          * as a 'virtual' station address for when both ports
 1668          * are operating in failover mode. Currently we don't
 1669          * use this extra address.
 1670          */
 1671         MSK_IF_LOCK(sc_if);
 1672         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1673                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1674 
 1675         /*
 1676          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1677          */
 1678         MSK_IF_UNLOCK(sc_if);
 1679         ether_ifattach(ifp, eaddr);
 1680         MSK_IF_LOCK(sc_if);
 1681 
 1682         /* VLAN capability setup */
 1683         ifp->if_capabilities |= IFCAP_VLAN_MTU;
 1684         if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) {
 1685                 /*
 1686                  * Due to Tx checksum offload hardware bugs, msk(4) manually
 1687                  * computes checksum for short frames. For VLAN tagged frames
 1688                  * this workaround does not work so disable checksum offload
 1689                  * for VLAN interface.
 1690                  */
 1691                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
 1692                 /*
 1693                  * Enable Rx checksum offloading for VLAN tagged frames
 1694                  * if controller support new descriptor format.
 1695                  */
 1696                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
 1697                     (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
 1698                         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
 1699         }
 1700         ifp->if_capenable = ifp->if_capabilities;
 1701         /*
 1702          * Disable RX checksum offloading on controllers that don't use
 1703          * new descriptor format but give chance to enable it.
 1704          */
 1705         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
 1706                 ifp->if_capenable &= ~IFCAP_RXCSUM;
 1707 
 1708         /*
 1709          * Tell the upper layer(s) we support long frames.
 1710          * Must appear after the call to ether_ifattach() because
 1711          * ether_ifattach() sets ifi_hdrlen to the default value.
 1712          */
 1713         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
 1714 
 1715         /*
 1716          * Do miibus setup.
 1717          */
 1718         MSK_IF_UNLOCK(sc_if);
 1719         error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange,
 1720             msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY,
 1721             mmd->mii_flags);
 1722         if (error != 0) {
 1723                 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n");
 1724                 ether_ifdetach(ifp);
 1725                 error = ENXIO;
 1726                 goto fail;
 1727         }
 1728 
 1729 fail:
 1730         if (error != 0) {
 1731                 /* Access should be ok even though lock has been dropped */
 1732                 sc->msk_if[port] = NULL;
 1733                 msk_detach(dev);
 1734         }
 1735 
 1736         return (error);
 1737 }
 1738 
 1739 /*
 1740  * Attach the interface. Allocate softc structures, do ifmedia
 1741  * setup and ethernet/BPF attach.
 1742  */
 1743 static int
 1744 mskc_attach(device_t dev)
 1745 {
 1746         struct msk_softc *sc;
 1747         struct msk_mii_data *mmd;
 1748         int error, msic, msir, reg;
 1749 
 1750         sc = device_get_softc(dev);
 1751         sc->msk_dev = dev;
 1752         mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
 1753             MTX_DEF);
 1754 
 1755         /*
 1756          * Map control/status registers.
 1757          */
 1758         pci_enable_busmaster(dev);
 1759 
 1760         /* Allocate I/O resource */
 1761 #ifdef MSK_USEIOSPACE
 1762         sc->msk_res_spec = msk_res_spec_io;
 1763 #else
 1764         sc->msk_res_spec = msk_res_spec_mem;
 1765 #endif
 1766         sc->msk_irq_spec = msk_irq_spec_legacy;
 1767         error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1768         if (error) {
 1769                 if (sc->msk_res_spec == msk_res_spec_mem)
 1770                         sc->msk_res_spec = msk_res_spec_io;
 1771                 else
 1772                         sc->msk_res_spec = msk_res_spec_mem;
 1773                 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res);
 1774                 if (error) {
 1775                         device_printf(dev, "couldn't allocate %s resources\n",
 1776                             sc->msk_res_spec == msk_res_spec_mem ? "memory" :
 1777                             "I/O");
 1778                         mtx_destroy(&sc->msk_mtx);
 1779                         return (ENXIO);
 1780                 }
 1781         }
 1782 
 1783         /* Enable all clocks before accessing any registers. */
 1784         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 1785 
 1786         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1787         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1788         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1789         /* Bail out if chip is not recognized. */
 1790         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1791             sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
 1792             sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
 1793                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1794                     sc->msk_hw_id, sc->msk_hw_rev);
 1795                 mtx_destroy(&sc->msk_mtx);
 1796                 return (ENXIO);
 1797         }
 1798 
 1799         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
 1800             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
 1801             OID_AUTO, "process_limit",
 1802             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
 1803             &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I",
 1804             "max number of Rx events to process");
 1805 
 1806         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1807         error = resource_int_value(device_get_name(dev), device_get_unit(dev),
 1808             "process_limit", &sc->msk_process_limit);
 1809         if (error == 0) {
 1810                 if (sc->msk_process_limit < MSK_PROC_MIN ||
 1811                     sc->msk_process_limit > MSK_PROC_MAX) {
 1812                         device_printf(dev, "process_limit value out of range; "
 1813                             "using default: %d\n", MSK_PROC_DEFAULT);
 1814                         sc->msk_process_limit = MSK_PROC_DEFAULT;
 1815                 }
 1816         }
 1817 
 1818         sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT;
 1819         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
 1820             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
 1821             "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0,
 1822             "Maximum number of time to delay interrupts");
 1823         resource_int_value(device_get_name(dev), device_get_unit(dev),
 1824             "int_holdoff", &sc->msk_int_holdoff);
 1825 
 1826         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1827         /* Check number of MACs. */
 1828         sc->msk_num_port = 1;
 1829         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1830             CFG_DUAL_MAC_MSK) {
 1831                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1832                         sc->msk_num_port++;
 1833         }
 1834 
 1835         /* Check bus type. */
 1836         if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, &reg) == 0) {
 1837                 sc->msk_bustype = MSK_PEX_BUS;
 1838                 sc->msk_expcap = reg;
 1839         } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, &reg) == 0) {
 1840                 sc->msk_bustype = MSK_PCIX_BUS;
 1841                 sc->msk_pcixcap = reg;
 1842         } else
 1843                 sc->msk_bustype = MSK_PCI_BUS;
 1844 
 1845         switch (sc->msk_hw_id) {
 1846         case CHIP_ID_YUKON_EC:
 1847                 sc->msk_clock = 125;    /* 125 MHz */
 1848                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1849                 break;
 1850         case CHIP_ID_YUKON_EC_U:
 1851                 sc->msk_clock = 125;    /* 125 MHz */
 1852                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM;
 1853                 break;
 1854         case CHIP_ID_YUKON_EX:
 1855                 sc->msk_clock = 125;    /* 125 MHz */
 1856                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1857                     MSK_FLAG_AUTOTX_CSUM;
 1858                 /*
 1859                  * Yukon Extreme seems to have silicon bug for
 1860                  * automatic Tx checksum calculation capability.
 1861                  */
 1862                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 1863                         sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM;
 1864                 /*
 1865                  * Yukon Extreme A0 could not use store-and-forward
 1866                  * for jumbo frames, so disable Tx checksum
 1867                  * offloading for jumbo frames.
 1868                  */
 1869                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
 1870                         sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM;
 1871                 break;
 1872         case CHIP_ID_YUKON_FE:
 1873                 sc->msk_clock = 100;    /* 100 MHz */
 1874                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1875                 break;
 1876         case CHIP_ID_YUKON_FE_P:
 1877                 sc->msk_clock = 50;     /* 50 MHz */
 1878                 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 |
 1879                     MSK_FLAG_AUTOTX_CSUM;
 1880                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1881                         /*
 1882                          * XXX
 1883                          * FE+ A0 has status LE writeback bug so msk(4)
 1884                          * does not rely on status word of received frame
 1885                          * in msk_rxeof() which in turn disables all
 1886                          * hardware assistance bits reported by the status
 1887                          * word as well as validity of the received frame.
 1888                          * Just pass received frames to upper stack with
 1889                          * minimal test and let upper stack handle them.
 1890                          */
 1891                         sc->msk_pflags |= MSK_FLAG_NOHWVLAN |
 1892                             MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM;
 1893                 }
 1894                 break;
 1895         case CHIP_ID_YUKON_XL:
 1896                 sc->msk_clock = 156;    /* 156 MHz */
 1897                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1898                 break;
 1899         case CHIP_ID_YUKON_SUPR:
 1900                 sc->msk_clock = 125;    /* 125 MHz */
 1901                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
 1902                     MSK_FLAG_AUTOTX_CSUM;
 1903                 break;
 1904         case CHIP_ID_YUKON_UL_2:
 1905                 sc->msk_clock = 125;    /* 125 MHz */
 1906                 sc->msk_pflags |= MSK_FLAG_JUMBO;
 1907                 break;
 1908         case CHIP_ID_YUKON_OPT:
 1909                 sc->msk_clock = 125;    /* 125 MHz */
 1910                 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2;
 1911                 break;
 1912         default:
 1913                 sc->msk_clock = 156;    /* 156 MHz */
 1914                 break;
 1915         }
 1916 
 1917         /* Allocate IRQ resources. */
 1918         msic = pci_msi_count(dev);
 1919         if (bootverbose)
 1920                 device_printf(dev, "MSI count : %d\n", msic);
 1921         if (legacy_intr != 0)
 1922                 msi_disable = 1;
 1923         if (msi_disable == 0 && msic > 0) {
 1924                 msir = 1;
 1925                 if (pci_alloc_msi(dev, &msir) == 0) {
 1926                         if (msir == 1) {
 1927                                 sc->msk_pflags |= MSK_FLAG_MSI;
 1928                                 sc->msk_irq_spec = msk_irq_spec_msi;
 1929                         } else
 1930                                 pci_release_msi(dev);
 1931                 }
 1932         }
 1933 
 1934         error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 1935         if (error) {
 1936                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1937                 goto fail;
 1938         }
 1939 
 1940         if ((error = msk_status_dma_alloc(sc)) != 0)
 1941                 goto fail;
 1942 
 1943         /* Set base interrupt mask. */
 1944         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1945         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1946             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1947 
 1948         /* Reset the adapter. */
 1949         mskc_reset(sc);
 1950 
 1951         if ((error = mskc_setup_rambuffer(sc)) != 0)
 1952                 goto fail;
 1953 
 1954         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1955         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1956                 device_printf(dev, "failed to add child for PORT_A\n");
 1957                 error = ENXIO;
 1958                 goto fail;
 1959         }
 1960         mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
 1961         mmd->port = MSK_PORT_A;
 1962         mmd->pmd = sc->msk_pmd;
 1963         mmd->mii_flags |= MIIF_DOPAUSE;
 1964         if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1965                 mmd->mii_flags |= MIIF_HAVEFIBER;
 1966         if (sc->msk_pmd == 'P')
 1967                 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1968         device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
 1969 
 1970         if (sc->msk_num_port > 1) {
 1971                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1972                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1973                         device_printf(dev, "failed to add child for PORT_B\n");
 1974                         error = ENXIO;
 1975                         goto fail;
 1976                 }
 1977                 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
 1978                     M_ZERO);
 1979                 mmd->port = MSK_PORT_B;
 1980                 mmd->pmd = sc->msk_pmd;
 1981                 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1982                         mmd->mii_flags |= MIIF_HAVEFIBER;
 1983                 if (sc->msk_pmd == 'P')
 1984                         mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
 1985                 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
 1986         }
 1987 
 1988         error = bus_generic_attach(dev);
 1989         if (error) {
 1990                 device_printf(dev, "failed to attach port(s)\n");
 1991                 goto fail;
 1992         }
 1993 
 1994         /* Hook interrupt last to avoid having to lock softc. */
 1995         error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
 1996             INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand);
 1997         if (error != 0) {
 1998                 device_printf(dev, "couldn't set up interrupt handler\n");
 1999                 goto fail;
 2000         }
 2001 fail:
 2002         if (error != 0)
 2003                 mskc_detach(dev);
 2004 
 2005         return (error);
 2006 }
 2007 
 2008 /*
 2009  * Shutdown hardware and free up resources. This can be called any
 2010  * time after the mutex has been initialized. It is called in both
 2011  * the error case in attach and the normal detach case so it needs
 2012  * to be careful about only freeing resources that have actually been
 2013  * allocated.
 2014  */
 2015 static int
 2016 msk_detach(device_t dev)
 2017 {
 2018         struct msk_softc *sc;
 2019         struct msk_if_softc *sc_if;
 2020         struct ifnet *ifp;
 2021 
 2022         sc_if = device_get_softc(dev);
 2023         KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx),
 2024             ("msk mutex not initialized in msk_detach"));
 2025         MSK_IF_LOCK(sc_if);
 2026 
 2027         ifp = sc_if->msk_ifp;
 2028         if (device_is_attached(dev)) {
 2029                 /* XXX */
 2030                 sc_if->msk_flags |= MSK_FLAG_DETACH;
 2031                 msk_stop(sc_if);
 2032                 /* Can't hold locks while calling detach. */
 2033                 MSK_IF_UNLOCK(sc_if);
 2034                 callout_drain(&sc_if->msk_tick_ch);
 2035                 if (ifp)
 2036                         ether_ifdetach(ifp);
 2037                 MSK_IF_LOCK(sc_if);
 2038         }
 2039 
 2040         /*
 2041          * We're generally called from mskc_detach() which is using
 2042          * device_delete_child() to get to here. It's already trashed
 2043          * miibus for us, so don't do it here or we'll panic.
 2044          *
 2045          * if (sc_if->msk_miibus != NULL) {
 2046          *      device_delete_child(dev, sc_if->msk_miibus);
 2047          *      sc_if->msk_miibus = NULL;
 2048          * }
 2049          */
 2050 
 2051         msk_rx_dma_jfree(sc_if);
 2052         msk_txrx_dma_free(sc_if);
 2053         bus_generic_detach(dev);
 2054 
 2055         sc = sc_if->msk_softc;
 2056         sc->msk_if[sc_if->msk_port] = NULL;
 2057         MSK_IF_UNLOCK(sc_if);
 2058         if (ifp)
 2059                 if_free(ifp);
 2060 
 2061         return (0);
 2062 }
 2063 
 2064 static int
 2065 mskc_detach(device_t dev)
 2066 {
 2067         struct msk_softc *sc;
 2068 
 2069         sc = device_get_softc(dev);
 2070         KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
 2071 
 2072         if (device_is_alive(dev)) {
 2073                 if (sc->msk_devs[MSK_PORT_A] != NULL) {
 2074                         free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
 2075                             M_DEVBUF);
 2076                         device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
 2077                 }
 2078                 if (sc->msk_devs[MSK_PORT_B] != NULL) {
 2079                         free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
 2080                             M_DEVBUF);
 2081                         device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
 2082                 }
 2083                 bus_generic_detach(dev);
 2084         }
 2085 
 2086         /* Disable all interrupts. */
 2087         CSR_WRITE_4(sc, B0_IMSK, 0);
 2088         CSR_READ_4(sc, B0_IMSK);
 2089         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2090         CSR_READ_4(sc, B0_HWE_IMSK);
 2091 
 2092         /* LED Off. */
 2093         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 2094 
 2095         /* Put hardware reset. */
 2096         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2097 
 2098         msk_status_dma_free(sc);
 2099 
 2100         if (sc->msk_intrhand) {
 2101                 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand);
 2102                 sc->msk_intrhand = NULL;
 2103         }
 2104         bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq);
 2105         if ((sc->msk_pflags & MSK_FLAG_MSI) != 0)
 2106                 pci_release_msi(dev);
 2107         bus_release_resources(dev, sc->msk_res_spec, sc->msk_res);
 2108         mtx_destroy(&sc->msk_mtx);
 2109 
 2110         return (0);
 2111 }
 2112 
 2113 static bus_dma_tag_t
 2114 mskc_get_dma_tag(device_t bus, device_t child __unused)
 2115 {
 2116 
 2117         return (bus_get_dma_tag(bus));
 2118 }
 2119 
 2120 struct msk_dmamap_arg {
 2121         bus_addr_t      msk_busaddr;
 2122 };
 2123 
 2124 static void
 2125 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2126 {
 2127         struct msk_dmamap_arg *ctx;
 2128 
 2129         if (error != 0)
 2130                 return;
 2131         ctx = arg;
 2132         ctx->msk_busaddr = segs[0].ds_addr;
 2133 }
 2134 
 2135 /* Create status DMA region. */
 2136 static int
 2137 msk_status_dma_alloc(struct msk_softc *sc)
 2138 {
 2139         struct msk_dmamap_arg ctx;
 2140         bus_size_t stat_sz;
 2141         int count, error;
 2142 
 2143         /*
 2144          * It seems controller requires number of status LE entries
 2145          * is power of 2 and the maximum number of status LE entries
 2146          * is 4096.  For dual-port controllers, the number of status
 2147          * LE entries should be large enough to hold both port's
 2148          * status updates.
 2149          */
 2150         count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT;
 2151         count = imin(4096, roundup2(count, 1024));
 2152         sc->msk_stat_count = count;
 2153         stat_sz = count * sizeof(struct msk_stat_desc);
 2154         error = bus_dma_tag_create(
 2155                     bus_get_dma_tag(sc->msk_dev),       /* parent */
 2156                     MSK_STAT_ALIGN, 0,          /* alignment, boundary */
 2157                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2158                     BUS_SPACE_MAXADDR,          /* highaddr */
 2159                     NULL, NULL,                 /* filter, filterarg */
 2160                     stat_sz,                    /* maxsize */
 2161                     1,                          /* nsegments */
 2162                     stat_sz,                    /* maxsegsize */
 2163                     0,                          /* flags */
 2164                     NULL, NULL,                 /* lockfunc, lockarg */
 2165                     &sc->msk_stat_tag);
 2166         if (error != 0) {
 2167                 device_printf(sc->msk_dev,
 2168                     "failed to create status DMA tag\n");
 2169                 return (error);
 2170         }
 2171 
 2172         /* Allocate DMA'able memory and load the DMA map for status ring. */
 2173         error = bus_dmamem_alloc(sc->msk_stat_tag,
 2174             (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
 2175             BUS_DMA_ZERO, &sc->msk_stat_map);
 2176         if (error != 0) {
 2177                 device_printf(sc->msk_dev,
 2178                     "failed to allocate DMA'able memory for status ring\n");
 2179                 return (error);
 2180         }
 2181 
 2182         ctx.msk_busaddr = 0;
 2183         error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
 2184             sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2185         if (error != 0) {
 2186                 device_printf(sc->msk_dev,
 2187                     "failed to load DMA'able memory for status ring\n");
 2188                 return (error);
 2189         }
 2190         sc->msk_stat_ring_paddr = ctx.msk_busaddr;
 2191 
 2192         return (0);
 2193 }
 2194 
 2195 static void
 2196 msk_status_dma_free(struct msk_softc *sc)
 2197 {
 2198 
 2199         /* Destroy status block. */
 2200         if (sc->msk_stat_tag) {
 2201                 if (sc->msk_stat_ring_paddr) {
 2202                         bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 2203                         sc->msk_stat_ring_paddr = 0;
 2204                 }
 2205                 if (sc->msk_stat_ring) {
 2206                         bus_dmamem_free(sc->msk_stat_tag,
 2207                             sc->msk_stat_ring, sc->msk_stat_map);
 2208                         sc->msk_stat_ring = NULL;
 2209                 }
 2210                 bus_dma_tag_destroy(sc->msk_stat_tag);
 2211                 sc->msk_stat_tag = NULL;
 2212         }
 2213 }
 2214 
 2215 static int
 2216 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 2217 {
 2218         struct msk_dmamap_arg ctx;
 2219         struct msk_txdesc *txd;
 2220         struct msk_rxdesc *rxd;
 2221         bus_size_t rxalign;
 2222         int error, i;
 2223 
 2224         /* Create parent DMA tag. */
 2225         error = bus_dma_tag_create(
 2226                     bus_get_dma_tag(sc_if->msk_if_dev), /* parent */
 2227                     1, 0,                       /* alignment, boundary */
 2228                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2229                     BUS_SPACE_MAXADDR,          /* highaddr */
 2230                     NULL, NULL,                 /* filter, filterarg */
 2231                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 2232                     0,                          /* nsegments */
 2233                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 2234                     0,                          /* flags */
 2235                     NULL, NULL,                 /* lockfunc, lockarg */
 2236                     &sc_if->msk_cdata.msk_parent_tag);
 2237         if (error != 0) {
 2238                 device_printf(sc_if->msk_if_dev,
 2239                     "failed to create parent DMA tag\n");
 2240                 goto fail;
 2241         }
 2242         /* Create tag for Tx ring. */
 2243         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2244                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2245                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2246                     BUS_SPACE_MAXADDR,          /* highaddr */
 2247                     NULL, NULL,                 /* filter, filterarg */
 2248                     MSK_TX_RING_SZ,             /* maxsize */
 2249                     1,                          /* nsegments */
 2250                     MSK_TX_RING_SZ,             /* maxsegsize */
 2251                     0,                          /* flags */
 2252                     NULL, NULL,                 /* lockfunc, lockarg */
 2253                     &sc_if->msk_cdata.msk_tx_ring_tag);
 2254         if (error != 0) {
 2255                 device_printf(sc_if->msk_if_dev,
 2256                     "failed to create Tx ring DMA tag\n");
 2257                 goto fail;
 2258         }
 2259 
 2260         /* Create tag for Rx ring. */
 2261         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2262                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2263                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2264                     BUS_SPACE_MAXADDR,          /* highaddr */
 2265                     NULL, NULL,                 /* filter, filterarg */
 2266                     MSK_RX_RING_SZ,             /* maxsize */
 2267                     1,                          /* nsegments */
 2268                     MSK_RX_RING_SZ,             /* maxsegsize */
 2269                     0,                          /* flags */
 2270                     NULL, NULL,                 /* lockfunc, lockarg */
 2271                     &sc_if->msk_cdata.msk_rx_ring_tag);
 2272         if (error != 0) {
 2273                 device_printf(sc_if->msk_if_dev,
 2274                     "failed to create Rx ring DMA tag\n");
 2275                 goto fail;
 2276         }
 2277 
 2278         /* Create tag for Tx buffers. */
 2279         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2280                     1, 0,                       /* alignment, boundary */
 2281                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2282                     BUS_SPACE_MAXADDR,          /* highaddr */
 2283                     NULL, NULL,                 /* filter, filterarg */
 2284                     MSK_TSO_MAXSIZE,            /* maxsize */
 2285                     MSK_MAXTXSEGS,              /* nsegments */
 2286                     MSK_TSO_MAXSGSIZE,          /* maxsegsize */
 2287                     0,                          /* flags */
 2288                     NULL, NULL,                 /* lockfunc, lockarg */
 2289                     &sc_if->msk_cdata.msk_tx_tag);
 2290         if (error != 0) {
 2291                 device_printf(sc_if->msk_if_dev,
 2292                     "failed to create Tx DMA tag\n");
 2293                 goto fail;
 2294         }
 2295 
 2296         rxalign = 1;
 2297         /*
 2298          * Workaround hardware hang which seems to happen when Rx buffer
 2299          * is not aligned on multiple of FIFO word(8 bytes).
 2300          */
 2301         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2302                 rxalign = MSK_RX_BUF_ALIGN;
 2303         /* Create tag for Rx buffers. */
 2304         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2305                     rxalign, 0,                 /* alignment, boundary */
 2306                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2307                     BUS_SPACE_MAXADDR,          /* highaddr */
 2308                     NULL, NULL,                 /* filter, filterarg */
 2309                     MCLBYTES,                   /* maxsize */
 2310                     1,                          /* nsegments */
 2311                     MCLBYTES,                   /* maxsegsize */
 2312                     0,                          /* flags */
 2313                     NULL, NULL,                 /* lockfunc, lockarg */
 2314                     &sc_if->msk_cdata.msk_rx_tag);
 2315         if (error != 0) {
 2316                 device_printf(sc_if->msk_if_dev,
 2317                     "failed to create Rx DMA tag\n");
 2318                 goto fail;
 2319         }
 2320 
 2321         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
 2322         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag,
 2323             (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK |
 2324             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map);
 2325         if (error != 0) {
 2326                 device_printf(sc_if->msk_if_dev,
 2327                     "failed to allocate DMA'able memory for Tx ring\n");
 2328                 goto fail;
 2329         }
 2330 
 2331         ctx.msk_busaddr = 0;
 2332         error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag,
 2333             sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring,
 2334             MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2335         if (error != 0) {
 2336                 device_printf(sc_if->msk_if_dev,
 2337                     "failed to load DMA'able memory for Tx ring\n");
 2338                 goto fail;
 2339         }
 2340         sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr;
 2341 
 2342         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
 2343         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag,
 2344             (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK |
 2345             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map);
 2346         if (error != 0) {
 2347                 device_printf(sc_if->msk_if_dev,
 2348                     "failed to allocate DMA'able memory for Rx ring\n");
 2349                 goto fail;
 2350         }
 2351 
 2352         ctx.msk_busaddr = 0;
 2353         error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag,
 2354             sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring,
 2355             MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2356         if (error != 0) {
 2357                 device_printf(sc_if->msk_if_dev,
 2358                     "failed to load DMA'able memory for Rx ring\n");
 2359                 goto fail;
 2360         }
 2361         sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr;
 2362 
 2363         /* Create DMA maps for Tx buffers. */
 2364         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2365                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 2366                 txd->tx_m = NULL;
 2367                 txd->tx_dmamap = NULL;
 2368                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
 2369                     &txd->tx_dmamap);
 2370                 if (error != 0) {
 2371                         device_printf(sc_if->msk_if_dev,
 2372                             "failed to create Tx dmamap\n");
 2373                         goto fail;
 2374                 }
 2375         }
 2376         /* Create DMA maps for Rx buffers. */
 2377         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2378             &sc_if->msk_cdata.msk_rx_sparemap)) != 0) {
 2379                 device_printf(sc_if->msk_if_dev,
 2380                     "failed to create spare Rx dmamap\n");
 2381                 goto fail;
 2382         }
 2383         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2384                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2385                 rxd->rx_m = NULL;
 2386                 rxd->rx_dmamap = NULL;
 2387                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
 2388                     &rxd->rx_dmamap);
 2389                 if (error != 0) {
 2390                         device_printf(sc_if->msk_if_dev,
 2391                             "failed to create Rx dmamap\n");
 2392                         goto fail;
 2393                 }
 2394         }
 2395 
 2396 fail:
 2397         return (error);
 2398 }
 2399 
 2400 static int
 2401 msk_rx_dma_jalloc(struct msk_if_softc *sc_if)
 2402 {
 2403         struct msk_dmamap_arg ctx;
 2404         struct msk_rxdesc *jrxd;
 2405         bus_size_t rxalign;
 2406         int error, i;
 2407 
 2408         if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
 2409                 sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2410                 device_printf(sc_if->msk_if_dev,
 2411                     "disabling jumbo frame support\n");
 2412                 return (0);
 2413         }
 2414         /* Create tag for jumbo Rx ring. */
 2415         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2416                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2417                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2418                     BUS_SPACE_MAXADDR,          /* highaddr */
 2419                     NULL, NULL,                 /* filter, filterarg */
 2420                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2421                     1,                          /* nsegments */
 2422                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2423                     0,                          /* flags */
 2424                     NULL, NULL,                 /* lockfunc, lockarg */
 2425                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2426         if (error != 0) {
 2427                 device_printf(sc_if->msk_if_dev,
 2428                     "failed to create jumbo Rx ring DMA tag\n");
 2429                 goto jumbo_fail;
 2430         }
 2431 
 2432         rxalign = 1;
 2433         /*
 2434          * Workaround hardware hang which seems to happen when Rx buffer
 2435          * is not aligned on multiple of FIFO word(8 bytes).
 2436          */
 2437         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 2438                 rxalign = MSK_RX_BUF_ALIGN;
 2439         /* Create tag for jumbo Rx buffers. */
 2440         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2441                     rxalign, 0,                 /* alignment, boundary */
 2442                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2443                     BUS_SPACE_MAXADDR,          /* highaddr */
 2444                     NULL, NULL,                 /* filter, filterarg */
 2445                     MJUM9BYTES,                 /* maxsize */
 2446                     1,                          /* nsegments */
 2447                     MJUM9BYTES,                 /* maxsegsize */
 2448                     0,                          /* flags */
 2449                     NULL, NULL,                 /* lockfunc, lockarg */
 2450                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2451         if (error != 0) {
 2452                 device_printf(sc_if->msk_if_dev,
 2453                     "failed to create jumbo Rx DMA tag\n");
 2454                 goto jumbo_fail;
 2455         }
 2456 
 2457         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2458         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2459             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2460             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2461             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2462         if (error != 0) {
 2463                 device_printf(sc_if->msk_if_dev,
 2464                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2465                 goto jumbo_fail;
 2466         }
 2467 
 2468         ctx.msk_busaddr = 0;
 2469         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2470             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2471             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2472             msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
 2473         if (error != 0) {
 2474                 device_printf(sc_if->msk_if_dev,
 2475                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2476                 goto jumbo_fail;
 2477         }
 2478         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2479 
 2480         /* Create DMA maps for jumbo Rx buffers. */
 2481         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2482             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2483                 device_printf(sc_if->msk_if_dev,
 2484                     "failed to create spare jumbo Rx dmamap\n");
 2485                 goto jumbo_fail;
 2486         }
 2487         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2488                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2489                 jrxd->rx_m = NULL;
 2490                 jrxd->rx_dmamap = NULL;
 2491                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2492                     &jrxd->rx_dmamap);
 2493                 if (error != 0) {
 2494                         device_printf(sc_if->msk_if_dev,
 2495                             "failed to create jumbo Rx dmamap\n");
 2496                         goto jumbo_fail;
 2497                 }
 2498         }
 2499 
 2500         return (0);
 2501 
 2502 jumbo_fail:
 2503         msk_rx_dma_jfree(sc_if);
 2504         device_printf(sc_if->msk_if_dev, "disabling jumbo frame support "
 2505             "due to resource shortage\n");
 2506         sc_if->msk_flags &= ~MSK_FLAG_JUMBO;
 2507         return (error);
 2508 }
 2509 
 2510 static void
 2511 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2512 {
 2513         struct msk_txdesc *txd;
 2514         struct msk_rxdesc *rxd;
 2515         int i;
 2516 
 2517         /* Tx ring. */
 2518         if (sc_if->msk_cdata.msk_tx_ring_tag) {
 2519                 if (sc_if->msk_rdata.msk_tx_ring_paddr)
 2520                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag,
 2521                             sc_if->msk_cdata.msk_tx_ring_map);
 2522                 if (sc_if->msk_rdata.msk_tx_ring)
 2523                         bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag,
 2524                             sc_if->msk_rdata.msk_tx_ring,
 2525                             sc_if->msk_cdata.msk_tx_ring_map);
 2526                 sc_if->msk_rdata.msk_tx_ring = NULL;
 2527                 sc_if->msk_rdata.msk_tx_ring_paddr = 0;
 2528                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag);
 2529                 sc_if->msk_cdata.msk_tx_ring_tag = NULL;
 2530         }
 2531         /* Rx ring. */
 2532         if (sc_if->msk_cdata.msk_rx_ring_tag) {
 2533                 if (sc_if->msk_rdata.msk_rx_ring_paddr)
 2534                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag,
 2535                             sc_if->msk_cdata.msk_rx_ring_map);
 2536                 if (sc_if->msk_rdata.msk_rx_ring)
 2537                         bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag,
 2538                             sc_if->msk_rdata.msk_rx_ring,
 2539                             sc_if->msk_cdata.msk_rx_ring_map);
 2540                 sc_if->msk_rdata.msk_rx_ring = NULL;
 2541                 sc_if->msk_rdata.msk_rx_ring_paddr = 0;
 2542                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag);
 2543                 sc_if->msk_cdata.msk_rx_ring_tag = NULL;
 2544         }
 2545         /* Tx buffers. */
 2546         if (sc_if->msk_cdata.msk_tx_tag) {
 2547                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2548                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2549                         if (txd->tx_dmamap) {
 2550                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2551                                     txd->tx_dmamap);
 2552                                 txd->tx_dmamap = NULL;
 2553                         }
 2554                 }
 2555                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2556                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2557         }
 2558         /* Rx buffers. */
 2559         if (sc_if->msk_cdata.msk_rx_tag) {
 2560                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2561                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2562                         if (rxd->rx_dmamap) {
 2563                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2564                                     rxd->rx_dmamap);
 2565                                 rxd->rx_dmamap = NULL;
 2566                         }
 2567                 }
 2568                 if (sc_if->msk_cdata.msk_rx_sparemap) {
 2569                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2570                             sc_if->msk_cdata.msk_rx_sparemap);
 2571                         sc_if->msk_cdata.msk_rx_sparemap = 0;
 2572                 }
 2573                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2574                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2575         }
 2576         if (sc_if->msk_cdata.msk_parent_tag) {
 2577                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2578                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2579         }
 2580 }
 2581 
 2582 static void
 2583 msk_rx_dma_jfree(struct msk_if_softc *sc_if)
 2584 {
 2585         struct msk_rxdesc *jrxd;
 2586         int i;
 2587 
 2588         /* Jumbo Rx ring. */
 2589         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2590                 if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr)
 2591                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2592                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2593                 if (sc_if->msk_rdata.msk_jumbo_rx_ring)
 2594                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2595                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2596                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2597                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2598                 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0;
 2599                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2600                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2601         }
 2602         /* Jumbo Rx buffers. */
 2603         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2604                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2605                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2606                         if (jrxd->rx_dmamap) {
 2607                                 bus_dmamap_destroy(
 2608                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2609                                     jrxd->rx_dmamap);
 2610                                 jrxd->rx_dmamap = NULL;
 2611                         }
 2612                 }
 2613                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2614                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2615                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2616                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2617                 }
 2618                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2619                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2620         }
 2621 }
 2622 
 2623 static int
 2624 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2625 {
 2626         struct msk_txdesc *txd, *txd_last;
 2627         struct msk_tx_desc *tx_le;
 2628         struct mbuf *m;
 2629         bus_dmamap_t map;
 2630         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2631         uint32_t control, csum, prod, si;
 2632         uint16_t offset, tcp_offset, tso_mtu;
 2633         int error, i, nseg, tso;
 2634 
 2635         MSK_IF_LOCK_ASSERT(sc_if);
 2636 
 2637         tcp_offset = offset = 0;
 2638         m = *m_head;
 2639         if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2640             (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) ||
 2641             ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 2642             (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) {
 2643                 /*
 2644                  * Since mbuf has no protocol specific structure information
 2645                  * in it we have to inspect protocol information here to
 2646                  * setup TSO and checksum offload. I don't know why Marvell
 2647                  * made a such decision in chip design because other GigE
 2648                  * hardwares normally takes care of all these chores in
 2649                  * hardware. However, TSO performance of Yukon II is very
 2650                  * good such that it's worth to implement it.
 2651                  */
 2652                 struct ether_header *eh;
 2653                 struct ip *ip;
 2654                 struct tcphdr *tcp;
 2655 
 2656                 if (M_WRITABLE(m) == 0) {
 2657                         /* Get a writable copy. */
 2658                         m = m_dup(*m_head, M_NOWAIT);
 2659                         m_freem(*m_head);
 2660                         if (m == NULL) {
 2661                                 *m_head = NULL;
 2662                                 return (ENOBUFS);
 2663                         }
 2664                         *m_head = m;
 2665                 }
 2666 
 2667                 offset = sizeof(struct ether_header);
 2668                 m = m_pullup(m, offset);
 2669                 if (m == NULL) {
 2670                         *m_head = NULL;
 2671                         return (ENOBUFS);
 2672                 }
 2673                 eh = mtod(m, struct ether_header *);
 2674                 /* Check if hardware VLAN insertion is off. */
 2675                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2676                         offset = sizeof(struct ether_vlan_header);
 2677                         m = m_pullup(m, offset);
 2678                         if (m == NULL) {
 2679                                 *m_head = NULL;
 2680                                 return (ENOBUFS);
 2681                         }
 2682                 }
 2683                 m = m_pullup(m, offset + sizeof(struct ip));
 2684                 if (m == NULL) {
 2685                         *m_head = NULL;
 2686                         return (ENOBUFS);
 2687                 }
 2688                 ip = (struct ip *)(mtod(m, char *) + offset);
 2689                 offset += (ip->ip_hl << 2);
 2690                 tcp_offset = offset;
 2691                 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2692                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2693                         if (m == NULL) {
 2694                                 *m_head = NULL;
 2695                                 return (ENOBUFS);
 2696                         }
 2697                         tcp = (struct tcphdr *)(mtod(m, char *) + offset);
 2698                         offset += (tcp->th_off << 2);
 2699                 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 &&
 2700                     (m->m_pkthdr.len < MSK_MIN_FRAMELEN) &&
 2701                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2702                         /*
 2703                          * It seems that Yukon II has Tx checksum offload bug
 2704                          * for small TCP packets that's less than 60 bytes in
 2705                          * size (e.g. TCP window probe packet, pure ACK packet).
 2706                          * Common work around like padding with zeros to make
 2707                          * the frame minimum ethernet frame size didn't work at
 2708                          * all.
 2709                          * Instead of disabling checksum offload completely we
 2710                          * resort to S/W checksum routine when we encounter
 2711                          * short TCP frames.
 2712                          * Short UDP packets appear to be handled correctly by
 2713                          * Yukon II. Also I assume this bug does not happen on
 2714                          * controllers that use newer descriptor format or
 2715                          * automatic Tx checksum calculation.
 2716                          */
 2717                         m = m_pullup(m, offset + sizeof(struct tcphdr));
 2718                         if (m == NULL) {
 2719                                 *m_head = NULL;
 2720                                 return (ENOBUFS);
 2721                         }
 2722                         *(uint16_t *)(m->m_data + offset +
 2723                             m->m_pkthdr.csum_data) = in_cksum_skip(m,
 2724                             m->m_pkthdr.len, offset);
 2725                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2726                 }
 2727                 *m_head = m;
 2728         }
 2729 
 2730         prod = sc_if->msk_cdata.msk_tx_prod;
 2731         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2732         txd_last = txd;
 2733         map = txd->tx_dmamap;
 2734         error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
 2735             *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2736         if (error == EFBIG) {
 2737                 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
 2738                 if (m == NULL) {
 2739                         m_freem(*m_head);
 2740                         *m_head = NULL;
 2741                         return (ENOBUFS);
 2742                 }
 2743                 *m_head = m;
 2744                 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag,
 2745                     map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
 2746                 if (error != 0) {
 2747                         m_freem(*m_head);
 2748                         *m_head = NULL;
 2749                         return (error);
 2750                 }
 2751         } else if (error != 0)
 2752                 return (error);
 2753         if (nseg == 0) {
 2754                 m_freem(*m_head);
 2755                 *m_head = NULL;
 2756                 return (EIO);
 2757         }
 2758 
 2759         /* Check number of available descriptors. */
 2760         if (sc_if->msk_cdata.msk_tx_cnt + nseg >=
 2761             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
 2762                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
 2763                 return (ENOBUFS);
 2764         }
 2765 
 2766         control = 0;
 2767         tso = 0;
 2768         tx_le = NULL;
 2769 
 2770         /* Check TSO support. */
 2771         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
 2772                 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2773                         tso_mtu = m->m_pkthdr.tso_segsz;
 2774                 else
 2775                         tso_mtu = offset + m->m_pkthdr.tso_segsz;
 2776                 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) {
 2777                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2778                         tx_le->msk_addr = htole32(tso_mtu);
 2779                         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0)
 2780                                 tx_le->msk_control = htole32(OP_MSS | HW_OWNER);
 2781                         else
 2782                                 tx_le->msk_control =
 2783                                     htole32(OP_LRGLEN | HW_OWNER);
 2784                         sc_if->msk_cdata.msk_tx_cnt++;
 2785                         MSK_INC(prod, MSK_TX_RING_CNT);
 2786                         sc_if->msk_cdata.msk_tso_mtu = tso_mtu;
 2787                 }
 2788                 tso++;
 2789         }
 2790         /* Check if we have a VLAN tag to insert. */
 2791         if ((m->m_flags & M_VLANTAG) != 0) {
 2792                 if (tx_le == NULL) {
 2793                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2794                         tx_le->msk_addr = htole32(0);
 2795                         tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2796                             htons(m->m_pkthdr.ether_vtag));
 2797                         sc_if->msk_cdata.msk_tx_cnt++;
 2798                         MSK_INC(prod, MSK_TX_RING_CNT);
 2799                 } else {
 2800                         tx_le->msk_control |= htole32(OP_VLAN |
 2801                             htons(m->m_pkthdr.ether_vtag));
 2802                 }
 2803                 control |= INS_VLAN;
 2804         }
 2805         /* Check if we have to handle checksum offload. */
 2806         if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) {
 2807                 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0)
 2808                         control |= CALSUM;
 2809                 else {
 2810                         control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2811                         if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2812                                 control |= UDPTCP;
 2813                         /* Checksum write position. */
 2814                         csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff;
 2815                         /* Checksum start position. */
 2816                         csum |= (uint32_t)tcp_offset << 16;
 2817                         if (csum != sc_if->msk_cdata.msk_last_csum) {
 2818                                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2819                                 tx_le->msk_addr = htole32(csum);
 2820                                 tx_le->msk_control = htole32(1 << 16 |
 2821                                     (OP_TCPLISW | HW_OWNER));
 2822                                 sc_if->msk_cdata.msk_tx_cnt++;
 2823                                 MSK_INC(prod, MSK_TX_RING_CNT);
 2824                                 sc_if->msk_cdata.msk_last_csum = csum;
 2825                         }
 2826                 }
 2827         }
 2828 
 2829 #ifdef MSK_64BIT_DMA
 2830         if (MSK_ADDR_HI(txsegs[0].ds_addr) !=
 2831             sc_if->msk_cdata.msk_tx_high_addr) {
 2832                 sc_if->msk_cdata.msk_tx_high_addr =
 2833                     MSK_ADDR_HI(txsegs[0].ds_addr);
 2834                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2835                 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr));
 2836                 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2837                 sc_if->msk_cdata.msk_tx_cnt++;
 2838                 MSK_INC(prod, MSK_TX_RING_CNT);
 2839         }
 2840 #endif
 2841         si = prod;
 2842         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2843         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2844         if (tso == 0)
 2845                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2846                     OP_PACKET);
 2847         else
 2848                 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2849                     OP_LARGESEND);
 2850         sc_if->msk_cdata.msk_tx_cnt++;
 2851         MSK_INC(prod, MSK_TX_RING_CNT);
 2852 
 2853         for (i = 1; i < nseg; i++) {
 2854                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2855 #ifdef MSK_64BIT_DMA
 2856                 if (MSK_ADDR_HI(txsegs[i].ds_addr) !=
 2857                     sc_if->msk_cdata.msk_tx_high_addr) {
 2858                         sc_if->msk_cdata.msk_tx_high_addr =
 2859                             MSK_ADDR_HI(txsegs[i].ds_addr);
 2860                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2861                         tx_le->msk_addr =
 2862                             htole32(MSK_ADDR_HI(txsegs[i].ds_addr));
 2863                         tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER);
 2864                         sc_if->msk_cdata.msk_tx_cnt++;
 2865                         MSK_INC(prod, MSK_TX_RING_CNT);
 2866                         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2867                 }
 2868 #endif
 2869                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2870                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2871                     OP_BUFFER | HW_OWNER);
 2872                 sc_if->msk_cdata.msk_tx_cnt++;
 2873                 MSK_INC(prod, MSK_TX_RING_CNT);
 2874         }
 2875         /* Update producer index. */
 2876         sc_if->msk_cdata.msk_tx_prod = prod;
 2877 
 2878         /* Set EOP on the last descriptor. */
 2879         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2880         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2881         tx_le->msk_control |= htole32(EOP);
 2882 
 2883         /* Turn the first descriptor ownership to hardware. */
 2884         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2885         tx_le->msk_control |= htole32(HW_OWNER);
 2886 
 2887         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2888         map = txd_last->tx_dmamap;
 2889         txd_last->tx_dmamap = txd->tx_dmamap;
 2890         txd->tx_dmamap = map;
 2891         txd->tx_m = m;
 2892 
 2893         /* Sync descriptors. */
 2894         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2895         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 2896             sc_if->msk_cdata.msk_tx_ring_map,
 2897             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2898 
 2899         return (0);
 2900 }
 2901 
 2902 static void
 2903 msk_start(struct ifnet *ifp)
 2904 {
 2905         struct msk_if_softc *sc_if;
 2906 
 2907         sc_if = ifp->if_softc;
 2908         MSK_IF_LOCK(sc_if);
 2909         msk_start_locked(ifp);
 2910         MSK_IF_UNLOCK(sc_if);
 2911 }
 2912 
 2913 static void
 2914 msk_start_locked(struct ifnet *ifp)
 2915 {
 2916         struct msk_if_softc *sc_if;
 2917         struct mbuf *m_head;
 2918         int enq;
 2919 
 2920         sc_if = ifp->if_softc;
 2921         MSK_IF_LOCK_ASSERT(sc_if);
 2922 
 2923         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 2924             IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 2925                 return;
 2926 
 2927         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 2928             sc_if->msk_cdata.msk_tx_cnt <
 2929             (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
 2930                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 2931                 if (m_head == NULL)
 2932                         break;
 2933                 /*
 2934                  * Pack the data into the transmit ring. If we
 2935                  * don't have room, set the OACTIVE flag and wait
 2936                  * for the NIC to drain the ring.
 2937                  */
 2938                 if (msk_encap(sc_if, &m_head) != 0) {
 2939                         if (m_head == NULL)
 2940                                 break;
 2941                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
 2942                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 2943                         break;
 2944                 }
 2945 
 2946                 enq++;
 2947                 /*
 2948                  * If there's a BPF listener, bounce a copy of this frame
 2949                  * to him.
 2950                  */
 2951                 ETHER_BPF_MTAP(ifp, m_head);
 2952         }
 2953 
 2954         if (enq > 0) {
 2955                 /* Transmit */
 2956                 CSR_WRITE_2(sc_if->msk_softc,
 2957                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2958                     sc_if->msk_cdata.msk_tx_prod);
 2959 
 2960                 /* Set a timeout in case the chip goes out to lunch. */
 2961                 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT;
 2962         }
 2963 }
 2964 
 2965 static void
 2966 msk_watchdog(struct msk_if_softc *sc_if)
 2967 {
 2968         struct ifnet *ifp;
 2969 
 2970         MSK_IF_LOCK_ASSERT(sc_if);
 2971 
 2972         if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer)
 2973                 return;
 2974         ifp = sc_if->msk_ifp;
 2975         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) {
 2976                 if (bootverbose)
 2977                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2978                            "(missed link)\n");
 2979                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2980                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2981                 msk_init_locked(sc_if);
 2982                 return;
 2983         }
 2984 
 2985         if_printf(ifp, "watchdog timeout\n");
 2986         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 2987         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
 2988         msk_init_locked(sc_if);
 2989         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 2990                 msk_start_locked(ifp);
 2991 }
 2992 
 2993 static int
 2994 mskc_shutdown(device_t dev)
 2995 {
 2996         struct msk_softc *sc;
 2997         int i;
 2998 
 2999         sc = device_get_softc(dev);
 3000         MSK_LOCK(sc);
 3001         for (i = 0; i < sc->msk_num_port; i++) {
 3002                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3003                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3004                     IFF_DRV_RUNNING) != 0))
 3005                         msk_stop(sc->msk_if[i]);
 3006         }
 3007         MSK_UNLOCK(sc);
 3008 
 3009         /* Put hardware reset. */
 3010         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3011         return (0);
 3012 }
 3013 
 3014 static int
 3015 mskc_suspend(device_t dev)
 3016 {
 3017         struct msk_softc *sc;
 3018         int i;
 3019 
 3020         sc = device_get_softc(dev);
 3021 
 3022         MSK_LOCK(sc);
 3023 
 3024         for (i = 0; i < sc->msk_num_port; i++) {
 3025                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3026                     ((sc->msk_if[i]->msk_ifp->if_drv_flags &
 3027                     IFF_DRV_RUNNING) != 0))
 3028                         msk_stop(sc->msk_if[i]);
 3029         }
 3030 
 3031         /* Disable all interrupts. */
 3032         CSR_WRITE_4(sc, B0_IMSK, 0);
 3033         CSR_READ_4(sc, B0_IMSK);
 3034         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 3035         CSR_READ_4(sc, B0_HWE_IMSK);
 3036 
 3037         msk_phy_power(sc, MSK_PHY_POWERDOWN);
 3038 
 3039         /* Put hardware reset. */
 3040         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 3041         sc->msk_pflags |= MSK_FLAG_SUSPEND;
 3042 
 3043         MSK_UNLOCK(sc);
 3044 
 3045         return (0);
 3046 }
 3047 
 3048 static int
 3049 mskc_resume(device_t dev)
 3050 {
 3051         struct msk_softc *sc;
 3052         int i;
 3053 
 3054         sc = device_get_softc(dev);
 3055 
 3056         MSK_LOCK(sc);
 3057 
 3058         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 3059         mskc_reset(sc);
 3060         for (i = 0; i < sc->msk_num_port; i++) {
 3061                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 3062                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) {
 3063                         sc->msk_if[i]->msk_ifp->if_drv_flags &=
 3064                             ~IFF_DRV_RUNNING;
 3065                         msk_init_locked(sc->msk_if[i]);
 3066                 }
 3067         }
 3068         sc->msk_pflags &= ~MSK_FLAG_SUSPEND;
 3069 
 3070         MSK_UNLOCK(sc);
 3071 
 3072         return (0);
 3073 }
 3074 
 3075 #ifndef __NO_STRICT_ALIGNMENT
 3076 static __inline void
 3077 msk_fixup_rx(struct mbuf *m)
 3078 {
 3079         int i;
 3080         uint16_t *src, *dst;
 3081 
 3082         src = mtod(m, uint16_t *);
 3083         dst = src - 3;
 3084 
 3085         for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
 3086                 *dst++ = *src++;
 3087 
 3088         m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN);
 3089 }
 3090 #endif
 3091 
 3092 static __inline void
 3093 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
 3094 {
 3095         struct ether_header *eh;
 3096         struct ip *ip;
 3097         struct udphdr *uh;
 3098         int32_t hlen, len, pktlen, temp32;
 3099         uint16_t csum, *opts;
 3100 
 3101         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
 3102                 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
 3103                         m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 3104                         if ((control & CSS_IPV4_CSUM_OK) != 0)
 3105                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 3106                         if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
 3107                             (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
 3108                                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
 3109                                     CSUM_PSEUDO_HDR;
 3110                                 m->m_pkthdr.csum_data = 0xffff;
 3111                         }
 3112                 }
 3113                 return;
 3114         }
 3115         /*
 3116          * Marvell Yukon controllers that support OP_RXCHKS has known
 3117          * to have various Rx checksum offloading bugs. These
 3118          * controllers can be configured to compute simple checksum
 3119          * at two different positions. So we can compute IP and TCP/UDP
 3120          * checksum at the same time. We intentionally have controller
 3121          * compute TCP/UDP checksum twice by specifying the same
 3122          * checksum start position and compare the result. If the value
 3123          * is different it would indicate the hardware logic was wrong.
 3124          */
 3125         if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
 3126                 if (bootverbose)
 3127                         device_printf(sc_if->msk_if_dev,
 3128                             "Rx checksum value mismatch!\n");
 3129                 return;
 3130         }
 3131         pktlen = m->m_pkthdr.len;
 3132         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 3133                 return;
 3134         eh = mtod(m, struct ether_header *);
 3135         if (eh->ether_type != htons(ETHERTYPE_IP))
 3136                 return;
 3137         ip = (struct ip *)(eh + 1);
 3138         if (ip->ip_v != IPVERSION)
 3139                 return;
 3140 
 3141         hlen = ip->ip_hl << 2;
 3142         pktlen -= sizeof(struct ether_header);
 3143         if (hlen < sizeof(struct ip))
 3144                 return;
 3145         if (ntohs(ip->ip_len) < hlen)
 3146                 return;
 3147         if (ntohs(ip->ip_len) != pktlen)
 3148                 return;
 3149         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 3150                 return; /* can't handle fragmented packet. */
 3151 
 3152         switch (ip->ip_p) {
 3153         case IPPROTO_TCP:
 3154                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 3155                         return;
 3156                 break;
 3157         case IPPROTO_UDP:
 3158                 if (pktlen < (hlen + sizeof(struct udphdr)))
 3159                         return;
 3160                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 3161                 if (uh->uh_sum == 0)
 3162                         return; /* no checksum */
 3163                 break;
 3164         default:
 3165                 return;
 3166         }
 3167         csum = bswap16(sc_if->msk_csum & 0xFFFF);
 3168         /* Checksum fixup for IP options. */
 3169         len = hlen - sizeof(struct ip);
 3170         if (len > 0) {
 3171                 opts = (uint16_t *)(ip + 1);
 3172                 for (; len > 0; len -= sizeof(uint16_t), opts++) {
 3173                         temp32 = csum - *opts;
 3174                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 3175                         csum = temp32 & 65535;
 3176                 }
 3177         }
 3178         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 3179         m->m_pkthdr.csum_data = csum;
 3180 }
 3181 
 3182 static void
 3183 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3184     int len)
 3185 {
 3186         struct mbuf *m;
 3187         struct ifnet *ifp;
 3188         struct msk_rxdesc *rxd;
 3189         int cons, rxlen;
 3190 
 3191         ifp = sc_if->msk_ifp;
 3192 
 3193         MSK_IF_LOCK_ASSERT(sc_if);
 3194 
 3195         cons = sc_if->msk_cdata.msk_rx_cons;
 3196         do {
 3197                 rxlen = status >> 16;
 3198                 if ((status & GMR_FS_VLAN) != 0 &&
 3199                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3200                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3201                 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) {
 3202                         /*
 3203                          * For controllers that returns bogus status code
 3204                          * just do minimal check and let upper stack
 3205                          * handle this frame.
 3206                          */
 3207                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 3208                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3209                                 msk_discard_rxbuf(sc_if, cons);
 3210                                 break;
 3211                         }
 3212                 } else if (len > sc_if->msk_framesize ||
 3213                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3214                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3215                         /* Don't count flow-control packet as errors. */
 3216                         if ((status & GMR_FS_GOOD_FC) == 0)
 3217                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3218                         msk_discard_rxbuf(sc_if, cons);
 3219                         break;
 3220                 }
 3221 #ifdef MSK_64BIT_DMA
 3222                 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) %
 3223                     MSK_RX_RING_CNT];
 3224 #else
 3225                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 3226 #endif
 3227                 m = rxd->rx_m;
 3228                 if (msk_newbuf(sc_if, cons) != 0) {
 3229                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 3230                         /* Reuse old buffer. */
 3231                         msk_discard_rxbuf(sc_if, cons);
 3232                         break;
 3233                 }
 3234                 m->m_pkthdr.rcvif = ifp;
 3235                 m->m_pkthdr.len = m->m_len = len;
 3236 #ifndef __NO_STRICT_ALIGNMENT
 3237                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3238                         msk_fixup_rx(m);
 3239 #endif
 3240                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 3241                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3242                         msk_rxcsum(sc_if, control, m);
 3243                 /* Check for VLAN tagged packets. */
 3244                 if ((status & GMR_FS_VLAN) != 0 &&
 3245                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3246                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3247                         m->m_flags |= M_VLANTAG;
 3248                 }
 3249                 MSK_IF_UNLOCK(sc_if);
 3250                 (*ifp->if_input)(ifp, m);
 3251                 MSK_IF_LOCK(sc_if);
 3252         } while (0);
 3253 
 3254         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 3255         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 3256 }
 3257 
 3258 static void
 3259 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
 3260     int len)
 3261 {
 3262         struct mbuf *m;
 3263         struct ifnet *ifp;
 3264         struct msk_rxdesc *jrxd;
 3265         int cons, rxlen;
 3266 
 3267         ifp = sc_if->msk_ifp;
 3268 
 3269         MSK_IF_LOCK_ASSERT(sc_if);
 3270 
 3271         cons = sc_if->msk_cdata.msk_rx_cons;
 3272         do {
 3273                 rxlen = status >> 16;
 3274                 if ((status & GMR_FS_VLAN) != 0 &&
 3275                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 3276                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 3277                 if (len > sc_if->msk_framesize ||
 3278                     ((status & GMR_FS_ANY_ERR) != 0) ||
 3279                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 3280                         /* Don't count flow-control packet as errors. */
 3281                         if ((status & GMR_FS_GOOD_FC) == 0)
 3282                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 3283                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3284                         break;
 3285                 }
 3286 #ifdef MSK_64BIT_DMA
 3287                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) %
 3288                     MSK_JUMBO_RX_RING_CNT];
 3289 #else
 3290                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 3291 #endif
 3292                 m = jrxd->rx_m;
 3293                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 3294                         if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
 3295                         /* Reuse old buffer. */
 3296                         msk_discard_jumbo_rxbuf(sc_if, cons);
 3297                         break;
 3298                 }
 3299                 m->m_pkthdr.rcvif = ifp;
 3300                 m->m_pkthdr.len = m->m_len = len;
 3301 #ifndef __NO_STRICT_ALIGNMENT
 3302                 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0)
 3303                         msk_fixup_rx(m);
 3304 #endif
 3305                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
 3306                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 3307                         msk_rxcsum(sc_if, control, m);
 3308                 /* Check for VLAN tagged packets. */
 3309                 if ((status & GMR_FS_VLAN) != 0 &&
 3310                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 3311                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 3312                         m->m_flags |= M_VLANTAG;
 3313                 }
 3314                 MSK_IF_UNLOCK(sc_if);
 3315                 (*ifp->if_input)(ifp, m);
 3316                 MSK_IF_LOCK(sc_if);
 3317         } while (0);
 3318 
 3319         MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 3320         MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 3321 }
 3322 
 3323 static void
 3324 msk_txeof(struct msk_if_softc *sc_if, int idx)
 3325 {
 3326         struct msk_txdesc *txd;
 3327         struct msk_tx_desc *cur_tx;
 3328         struct ifnet *ifp;
 3329         uint32_t control;
 3330         int cons, prog;
 3331 
 3332         MSK_IF_LOCK_ASSERT(sc_if);
 3333 
 3334         ifp = sc_if->msk_ifp;
 3335 
 3336         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
 3337             sc_if->msk_cdata.msk_tx_ring_map,
 3338             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3339         /*
 3340          * Go through our tx ring and free mbufs for those
 3341          * frames that have been sent.
 3342          */
 3343         cons = sc_if->msk_cdata.msk_tx_cons;
 3344         prog = 0;
 3345         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 3346                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 3347                         break;
 3348                 prog++;
 3349                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 3350                 control = le32toh(cur_tx->msk_control);
 3351                 sc_if->msk_cdata.msk_tx_cnt--;
 3352                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 3353                 if ((control & EOP) == 0)
 3354                         continue;
 3355                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 3356                 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
 3357                     BUS_DMASYNC_POSTWRITE);
 3358                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 3359 
 3360                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 3361                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 3362                     __func__));
 3363                 m_freem(txd->tx_m);
 3364                 txd->tx_m = NULL;
 3365         }
 3366 
 3367         if (prog > 0) {
 3368                 sc_if->msk_cdata.msk_tx_cons = cons;
 3369                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 3370                         sc_if->msk_watchdog_timer = 0;
 3371                 /* No need to sync LEs as we didn't update LEs. */
 3372         }
 3373 }
 3374 
 3375 static void
 3376 msk_tick(void *xsc_if)
 3377 {
 3378         struct epoch_tracker et;
 3379         struct msk_if_softc *sc_if;
 3380         struct mii_data *mii;
 3381 
 3382         sc_if = xsc_if;
 3383 
 3384         MSK_IF_LOCK_ASSERT(sc_if);
 3385 
 3386         mii = device_get_softc(sc_if->msk_miibus);
 3387 
 3388         mii_tick(mii);
 3389         if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0)
 3390                 msk_miibus_statchg(sc_if->msk_if_dev);
 3391         NET_EPOCH_ENTER(et);
 3392         msk_handle_events(sc_if->msk_softc);
 3393         NET_EPOCH_EXIT(et);
 3394         msk_watchdog(sc_if);
 3395         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3396 }
 3397 
 3398 static void
 3399 msk_intr_phy(struct msk_if_softc *sc_if)
 3400 {
 3401         uint16_t status;
 3402 
 3403         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3404         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 3405         /* Handle FIFO Underrun/Overflow? */
 3406         if ((status & PHY_M_IS_FIFO_ERROR))
 3407                 device_printf(sc_if->msk_if_dev,
 3408                     "PHY FIFO underrun/overflow.\n");
 3409 }
 3410 
 3411 static void
 3412 msk_intr_gmac(struct msk_if_softc *sc_if)
 3413 {
 3414         struct msk_softc *sc;
 3415         uint8_t status;
 3416 
 3417         sc = sc_if->msk_softc;
 3418         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3419 
 3420         /* GMAC Rx FIFO overrun. */
 3421         if ((status & GM_IS_RX_FF_OR) != 0)
 3422                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3423                     GMF_CLI_RX_FO);
 3424         /* GMAC Tx FIFO underrun. */
 3425         if ((status & GM_IS_TX_FF_UR) != 0) {
 3426                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3427                     GMF_CLI_TX_FU);
 3428                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3429                 /*
 3430                  * XXX
 3431                  * In case of Tx underrun, we may need to flush/reset
 3432                  * Tx MAC but that would also require resynchronization
 3433                  * with status LEs. Reinitializing status LEs would
 3434                  * affect other port in dual MAC configuration so it
 3435                  * should be avoided as possible as we can.
 3436                  * Due to lack of documentation it's all vague guess but
 3437                  * it needs more investigation.
 3438                  */
 3439         }
 3440 }
 3441 
 3442 static void
 3443 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3444 {
 3445         struct msk_softc *sc;
 3446 
 3447         sc = sc_if->msk_softc;
 3448         if ((status & Y2_IS_PAR_RD1) != 0) {
 3449                 device_printf(sc_if->msk_if_dev,
 3450                     "RAM buffer read parity error\n");
 3451                 /* Clear IRQ. */
 3452                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3453                     RI_CLR_RD_PERR);
 3454         }
 3455         if ((status & Y2_IS_PAR_WR1) != 0) {
 3456                 device_printf(sc_if->msk_if_dev,
 3457                     "RAM buffer write parity error\n");
 3458                 /* Clear IRQ. */
 3459                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3460                     RI_CLR_WR_PERR);
 3461         }
 3462         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3463                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3464                 /* Clear IRQ. */
 3465                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3466                     GMF_CLI_TX_PE);
 3467         }
 3468         if ((status & Y2_IS_PAR_RX1) != 0) {
 3469                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3470                 /* Clear IRQ. */
 3471                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3472         }
 3473         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3474                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3475                 /* Clear IRQ. */
 3476                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3477         }
 3478 }
 3479 
 3480 static void
 3481 msk_intr_hwerr(struct msk_softc *sc)
 3482 {
 3483         uint32_t status;
 3484         uint32_t tlphead[4];
 3485 
 3486         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3487         /* Time Stamp timer overflow. */
 3488         if ((status & Y2_IS_TIST_OV) != 0)
 3489                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3490         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3491                 /*
 3492                  * PCI Express Error occurred which is not described in PEX
 3493                  * spec.
 3494                  * This error is also mapped either to Master Abort(
 3495                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3496                  * can only be cleared there.
 3497                  */
 3498                 device_printf(sc->msk_dev,
 3499                     "PCI Express protocol violation error\n");
 3500         }
 3501 
 3502         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3503                 uint16_t v16;
 3504 
 3505                 if ((status & Y2_IS_MST_ERR) != 0)
 3506                         device_printf(sc->msk_dev,
 3507                             "unexpected IRQ Status error\n");
 3508                 else
 3509                         device_printf(sc->msk_dev,
 3510                             "unexpected IRQ Master error\n");
 3511                 /* Reset all bits in the PCI status register. */
 3512                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3513                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3514                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3515                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3516                     PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2);
 3517                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3518         }
 3519 
 3520         /* Check for PCI Express Uncorrectable Error. */
 3521         if ((status & Y2_IS_PCI_EXP) != 0) {
 3522                 uint32_t v32;
 3523 
 3524                 /*
 3525                  * On PCI Express bus bridges are called root complexes (RC).
 3526                  * PCI Express errors are recognized by the root complex too,
 3527                  * which requests the system to handle the problem. After
 3528                  * error occurrence it may be that no access to the adapter
 3529                  * may be performed any longer.
 3530                  */
 3531 
 3532                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3533                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3534                         /* Ignore unsupported request error. */
 3535                         device_printf(sc->msk_dev,
 3536                             "Uncorrectable PCI Express error\n");
 3537                 }
 3538                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3539                         int i;
 3540 
 3541                         /* Get TLP header form Log Registers. */
 3542                         for (i = 0; i < 4; i++)
 3543                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3544                                     PEX_HEADER_LOG + i * 4);
 3545                         /* Check for vendor defined broadcast message. */
 3546                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3547                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3548                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3549                                     sc->msk_intrhwemask);
 3550                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3551                         }
 3552                 }
 3553                 /* Clear the interrupt. */
 3554                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3555                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3556                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3557         }
 3558 
 3559         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3560                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3561         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3562                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3563 }
 3564 
 3565 static __inline void
 3566 msk_rxput(struct msk_if_softc *sc_if)
 3567 {
 3568         struct msk_softc *sc;
 3569 
 3570         sc = sc_if->msk_softc;
 3571         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN))
 3572                 bus_dmamap_sync(
 3573                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3574                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3575                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3576         else
 3577                 bus_dmamap_sync(
 3578                     sc_if->msk_cdata.msk_rx_ring_tag,
 3579                     sc_if->msk_cdata.msk_rx_ring_map,
 3580                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3581         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3582             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3583 }
 3584 
 3585 static int
 3586 msk_handle_events(struct msk_softc *sc)
 3587 {
 3588         struct msk_if_softc *sc_if;
 3589         int rxput[2];
 3590         struct msk_stat_desc *sd;
 3591         uint32_t control, status;
 3592         int cons, len, port, rxprog;
 3593 
 3594         if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX))
 3595                 return (0);
 3596 
 3597         /* Sync status LEs. */
 3598         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3599             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 3600 
 3601         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3602         rxprog = 0;
 3603         cons = sc->msk_stat_cons;
 3604         for (;;) {
 3605                 sd = &sc->msk_stat_ring[cons];
 3606                 control = le32toh(sd->msk_control);
 3607                 if ((control & HW_OWNER) == 0)
 3608                         break;
 3609                 control &= ~HW_OWNER;
 3610                 sd->msk_control = htole32(control);
 3611                 status = le32toh(sd->msk_status);
 3612                 len = control & STLE_LEN_MASK;
 3613                 port = (control >> 16) & 0x01;
 3614                 sc_if = sc->msk_if[port];
 3615                 if (sc_if == NULL) {
 3616                         device_printf(sc->msk_dev, "invalid port opcode "
 3617                             "0x%08x\n", control & STLE_OP_MASK);
 3618                         continue;
 3619                 }
 3620 
 3621                 switch (control & STLE_OP_MASK) {
 3622                 case OP_RXVLAN:
 3623                         sc_if->msk_vtag = ntohs(len);
 3624                         break;
 3625                 case OP_RXCHKSVLAN:
 3626                         sc_if->msk_vtag = ntohs(len);
 3627                         /* FALLTHROUGH */
 3628                 case OP_RXCHKS:
 3629                         sc_if->msk_csum = status;
 3630                         break;
 3631                 case OP_RXSTAT:
 3632                         if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
 3633                                 break;
 3634                         if (sc_if->msk_framesize >
 3635                             (MCLBYTES - MSK_RX_BUF_ALIGN))
 3636                                 msk_jumbo_rxeof(sc_if, status, control, len);
 3637                         else
 3638                                 msk_rxeof(sc_if, status, control, len);
 3639                         rxprog++;
 3640                         /*
 3641                          * Because there is no way to sync single Rx LE
 3642                          * put the DMA sync operation off until the end of
 3643                          * event processing.
 3644                          */
 3645                         rxput[port]++;
 3646                         /* Update prefetch unit if we've passed water mark. */
 3647                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3648                                 msk_rxput(sc_if);
 3649                                 rxput[port] = 0;
 3650                         }
 3651                         break;
 3652                 case OP_TXINDEXLE:
 3653                         if (sc->msk_if[MSK_PORT_A] != NULL)
 3654                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3655                                     status & STLE_TXA1_MSKL);
 3656                         if (sc->msk_if[MSK_PORT_B] != NULL)
 3657                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3658                                     ((status & STLE_TXA2_MSKL) >>
 3659                                     STLE_TXA2_SHIFTL) |
 3660                                     ((len & STLE_TXA2_MSKH) <<
 3661                                     STLE_TXA2_SHIFTH));
 3662                         break;
 3663                 default:
 3664                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3665                             control & STLE_OP_MASK);
 3666                         break;
 3667                 }
 3668                 MSK_INC(cons, sc->msk_stat_count);
 3669                 if (rxprog > sc->msk_process_limit)
 3670                         break;
 3671         }
 3672 
 3673         sc->msk_stat_cons = cons;
 3674         bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
 3675             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 3676 
 3677         if (rxput[MSK_PORT_A] > 0)
 3678                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3679         if (rxput[MSK_PORT_B] > 0)
 3680                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3681 
 3682         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3683 }
 3684 
 3685 static void
 3686 msk_intr(void *xsc)
 3687 {
 3688         struct msk_softc *sc;
 3689         struct msk_if_softc *sc_if0, *sc_if1;
 3690         struct ifnet *ifp0, *ifp1;
 3691         uint32_t status;
 3692         int domore;
 3693 
 3694         sc = xsc;
 3695         MSK_LOCK(sc);
 3696 
 3697         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3698         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3699         if (status == 0 || status == 0xffffffff ||
 3700             (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 ||
 3701             (status & sc->msk_intrmask) == 0) {
 3702                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3703                 MSK_UNLOCK(sc);
 3704                 return;
 3705         }
 3706 
 3707         sc_if0 = sc->msk_if[MSK_PORT_A];
 3708         sc_if1 = sc->msk_if[MSK_PORT_B];
 3709         ifp0 = ifp1 = NULL;
 3710         if (sc_if0 != NULL)
 3711                 ifp0 = sc_if0->msk_ifp;
 3712         if (sc_if1 != NULL)
 3713                 ifp1 = sc_if1->msk_ifp;
 3714 
 3715         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3716                 msk_intr_phy(sc_if0);
 3717         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3718                 msk_intr_phy(sc_if1);
 3719         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3720                 msk_intr_gmac(sc_if0);
 3721         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3722                 msk_intr_gmac(sc_if1);
 3723         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3724                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3725                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3726                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3727                 CSR_READ_4(sc, B0_IMSK);
 3728         }
 3729         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3730                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3731                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3732                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3733                 CSR_READ_4(sc, B0_IMSK);
 3734         }
 3735         if ((status & Y2_IS_HW_ERR) != 0)
 3736                 msk_intr_hwerr(sc);
 3737 
 3738         domore = msk_handle_events(sc);
 3739         if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0)
 3740                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3741 
 3742         /* Reenable interrupts. */
 3743         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3744 
 3745         if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3746             !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
 3747                 msk_start_locked(ifp0);
 3748         if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 3749             !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
 3750                 msk_start_locked(ifp1);
 3751 
 3752         MSK_UNLOCK(sc);
 3753 }
 3754 
 3755 static void
 3756 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3757 {
 3758         struct msk_softc *sc;
 3759         struct ifnet *ifp;
 3760 
 3761         ifp = sc_if->msk_ifp;
 3762         sc = sc_if->msk_softc;
 3763         if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
 3764             sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
 3765             sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
 3766                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3767                     TX_STFW_ENA);
 3768         } else {
 3769                 if (ifp->if_mtu > ETHERMTU) {
 3770                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3771                         CSR_WRITE_4(sc,
 3772                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3773                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3774                         /* Disable Store & Forward mode for Tx. */
 3775                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3776                             TX_STFW_DIS);
 3777                 } else {
 3778                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3779                             TX_STFW_ENA);
 3780                 }
 3781         }
 3782 }
 3783 
 3784 static void
 3785 msk_init(void *xsc)
 3786 {
 3787         struct msk_if_softc *sc_if = xsc;
 3788 
 3789         MSK_IF_LOCK(sc_if);
 3790         msk_init_locked(sc_if);
 3791         MSK_IF_UNLOCK(sc_if);
 3792 }
 3793 
 3794 static void
 3795 msk_init_locked(struct msk_if_softc *sc_if)
 3796 {
 3797         struct msk_softc *sc;
 3798         struct ifnet *ifp;
 3799         struct mii_data  *mii;
 3800         uint8_t *eaddr;
 3801         uint16_t gmac;
 3802         uint32_t reg;
 3803         int error;
 3804 
 3805         MSK_IF_LOCK_ASSERT(sc_if);
 3806 
 3807         ifp = sc_if->msk_ifp;
 3808         sc = sc_if->msk_softc;
 3809         mii = device_get_softc(sc_if->msk_miibus);
 3810 
 3811         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 3812                 return;
 3813 
 3814         error = 0;
 3815         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3816         msk_stop(sc_if);
 3817 
 3818         if (ifp->if_mtu < ETHERMTU)
 3819                 sc_if->msk_framesize = ETHERMTU;
 3820         else
 3821                 sc_if->msk_framesize = ifp->if_mtu;
 3822         sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
 3823         if (ifp->if_mtu > ETHERMTU &&
 3824             (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) {
 3825                 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
 3826                 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
 3827         }
 3828 
 3829         /* GMAC Control reset. */
 3830         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3831         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3832         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3833         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3834             sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
 3835                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3836                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3837                     GMC_BYP_RETR_ON);
 3838 
 3839         /*
 3840          * Initialize GMAC first such that speed/duplex/flow-control
 3841          * parameters are renegotiated when interface is brought up.
 3842          */
 3843         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3844 
 3845         /* Dummy read the Interrupt Source Register. */
 3846         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3847 
 3848         /* Clear MIB stats. */
 3849         msk_stats_clear(sc_if);
 3850 
 3851         /* Disable FCS. */
 3852         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3853 
 3854         /* Setup Transmit Control Register. */
 3855         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3856 
 3857         /* Setup Transmit Flow Control Register. */
 3858         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3859 
 3860         /* Setup Transmit Parameter Register. */
 3861         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3862             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3863             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3864 
 3865         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3866             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3867 
 3868         if (ifp->if_mtu > ETHERMTU)
 3869                 gmac |= GM_SMOD_JUMBO_ENA;
 3870         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3871 
 3872         /* Set station address. */
 3873         eaddr = IF_LLADDR(ifp);
 3874         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L,
 3875             eaddr[0] | (eaddr[1] << 8));
 3876         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M,
 3877             eaddr[2] | (eaddr[3] << 8));
 3878         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H,
 3879             eaddr[4] | (eaddr[5] << 8));
 3880         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L,
 3881             eaddr[0] | (eaddr[1] << 8));
 3882         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M,
 3883             eaddr[2] | (eaddr[3] << 8));
 3884         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H,
 3885             eaddr[4] | (eaddr[5] << 8));
 3886 
 3887         /* Disable interrupts for counter overflows. */
 3888         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3889         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3890         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3891 
 3892         /* Configure Rx MAC FIFO. */
 3893         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3894         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3895         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3896         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3897             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3898                 reg |= GMF_RX_OVER_ON;
 3899         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3900 
 3901         /* Set receive filter. */
 3902         msk_rxfilter(sc_if);
 3903 
 3904         if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 3905                 /* Clear flush mask - HW bug. */
 3906                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
 3907         } else {
 3908                 /* Flush Rx MAC FIFO on any flow control or error. */
 3909                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3910                     GMR_FS_ANY_ERR);
 3911         }
 3912 
 3913         /*
 3914          * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
 3915          * due to hardware hang on receipt of pause frames.
 3916          */
 3917         reg = RX_GMF_FL_THR_DEF + 1;
 3918         /* Another magic for Yukon FE+ - From Linux. */
 3919         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3920             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3921                 reg = 0x178;
 3922         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3923 
 3924         /* Configure Tx MAC FIFO. */
 3925         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3926         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3927         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3928 
 3929         /* Configure hardware VLAN tag insertion/stripping. */
 3930         msk_setvlan(sc_if, ifp);
 3931 
 3932         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3933                 /* Set Rx Pause threshold. */
 3934                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3935                     MSK_ECU_LLPP);
 3936                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3937                     MSK_ECU_ULPP);
 3938                 /* Configure store-and-forward for Tx. */
 3939                 msk_set_tx_stfwd(sc_if);
 3940         }
 3941 
 3942         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3943             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3944                 /* Disable dynamic watermark - from Linux. */
 3945                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3946                 reg &= ~0x03;
 3947                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3948         }
 3949 
 3950         /*
 3951          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3952          * arbiter as we don't use Sync Tx queue.
 3953          */
 3954         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3955             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3956         /* Enable the RAM Interface Arbiter. */
 3957         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3958 
 3959         /* Setup RAM buffer. */
 3960         msk_set_rambuffer(sc_if);
 3961 
 3962         /* Disable Tx sync Queue. */
 3963         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3964 
 3965         /* Setup Tx Queue Bus Memory Interface. */
 3966         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3967         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3968         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3969         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3970         switch (sc->msk_hw_id) {
 3971         case CHIP_ID_YUKON_EC_U:
 3972                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3973                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3974                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3975                             MSK_ECU_TXFF_LEV);
 3976                 }
 3977                 break;
 3978         case CHIP_ID_YUKON_EX:
 3979                 /*
 3980                  * Yukon Extreme seems to have silicon bug for
 3981                  * automatic Tx checksum calculation capability.
 3982                  */
 3983                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0)
 3984                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3985                             F_TX_CHK_AUTO_OFF);
 3986                 break;
 3987         }
 3988 
 3989         /* Setup Rx Queue Bus Memory Interface. */
 3990         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 3991         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 3992         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 3993         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 3994         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 3995             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 3996                 /* MAC Rx RAM Read is controlled by hardware. */
 3997                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 3998         }
 3999 
 4000         msk_set_prefetch(sc, sc_if->msk_txq,
 4001             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 4002         msk_init_tx_ring(sc_if);
 4003 
 4004         /* Disable Rx checksum offload and RSS hash. */
 4005         reg = BMU_DIS_RX_RSS_HASH;
 4006         if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
 4007             (ifp->if_capenable & IFCAP_RXCSUM) != 0)
 4008                 reg |= BMU_ENA_RX_CHKSUM;
 4009         else
 4010                 reg |= BMU_DIS_RX_CHKSUM;
 4011         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
 4012         if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
 4013                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4014                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 4015                     MSK_JUMBO_RX_RING_CNT - 1);
 4016                 error = msk_init_jumbo_rx_ring(sc_if);
 4017          } else {
 4018                 msk_set_prefetch(sc, sc_if->msk_rxq,
 4019                     sc_if->msk_rdata.msk_rx_ring_paddr,
 4020                     MSK_RX_RING_CNT - 1);
 4021                 error = msk_init_rx_ring(sc_if);
 4022         }
 4023         if (error != 0) {
 4024                 device_printf(sc_if->msk_if_dev,
 4025                     "initialization failed: no memory for Rx buffers\n");
 4026                 msk_stop(sc_if);
 4027                 return;
 4028         }
 4029         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 4030             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 4031                 /* Disable flushing of non-ASF packets. */
 4032                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 4033                     GMF_RX_MACSEC_FLUSH_OFF);
 4034         }
 4035 
 4036         /* Configure interrupt handling. */
 4037         if (sc_if->msk_port == MSK_PORT_A) {
 4038                 sc->msk_intrmask |= Y2_IS_PORT_A;
 4039                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 4040         } else {
 4041                 sc->msk_intrmask |= Y2_IS_PORT_B;
 4042                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 4043         }
 4044         /* Configure IRQ moderation mask. */
 4045         CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
 4046         if (sc->msk_int_holdoff > 0) {
 4047                 /* Configure initial IRQ moderation timer value. */
 4048                 CSR_WRITE_4(sc, B2_IRQM_INI,
 4049                     MSK_USECS(sc, sc->msk_int_holdoff));
 4050                 CSR_WRITE_4(sc, B2_IRQM_VAL,
 4051                     MSK_USECS(sc, sc->msk_int_holdoff));
 4052                 /* Start IRQ moderation. */
 4053                 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START);
 4054         }
 4055         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4056         CSR_READ_4(sc, B0_HWE_IMSK);
 4057         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4058         CSR_READ_4(sc, B0_IMSK);
 4059 
 4060         ifp->if_drv_flags |= IFF_DRV_RUNNING;
 4061         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 4062 
 4063         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4064         mii_mediachg(mii);
 4065 
 4066         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 4067 }
 4068 
 4069 static void
 4070 msk_set_rambuffer(struct msk_if_softc *sc_if)
 4071 {
 4072         struct msk_softc *sc;
 4073         int ltpp, utpp;
 4074 
 4075         sc = sc_if->msk_softc;
 4076         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 4077                 return;
 4078 
 4079         /* Setup Rx Queue. */
 4080         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 4081         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 4082             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4083         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 4084             sc->msk_rxqend[sc_if->msk_port] / 8);
 4085         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 4086             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4087         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 4088             sc->msk_rxqstart[sc_if->msk_port] / 8);
 4089 
 4090         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4091             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 4092         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 4093             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 4094         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 4095                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 4096         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 4097         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 4098         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 4099 
 4100         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 4101         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 4102 
 4103         /* Setup Tx Queue. */
 4104         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 4105         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 4106             sc->msk_txqstart[sc_if->msk_port] / 8);
 4107         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 4108             sc->msk_txqend[sc_if->msk_port] / 8);
 4109         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 4110             sc->msk_txqstart[sc_if->msk_port] / 8);
 4111         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 4112             sc->msk_txqstart[sc_if->msk_port] / 8);
 4113         /* Enable Store & Forward for Tx side. */
 4114         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 4115         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 4116         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 4117 }
 4118 
 4119 static void
 4120 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 4121     uint32_t count)
 4122 {
 4123 
 4124         /* Reset the prefetch unit. */
 4125         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4126             PREF_UNIT_RST_SET);
 4127         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4128             PREF_UNIT_RST_CLR);
 4129         /* Set LE base address. */
 4130         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 4131             MSK_ADDR_LO(addr));
 4132         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 4133             MSK_ADDR_HI(addr));
 4134         /* Set the list last index. */
 4135         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 4136             count);
 4137         /* Turn on prefetch unit. */
 4138         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 4139             PREF_UNIT_OP_ON);
 4140         /* Dummy read to ensure write. */
 4141         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 4142 }
 4143 
 4144 static void
 4145 msk_stop(struct msk_if_softc *sc_if)
 4146 {
 4147         struct msk_softc *sc;
 4148         struct msk_txdesc *txd;
 4149         struct msk_rxdesc *rxd;
 4150         struct msk_rxdesc *jrxd;
 4151         struct ifnet *ifp;
 4152         uint32_t val;
 4153         int i;
 4154 
 4155         MSK_IF_LOCK_ASSERT(sc_if);
 4156         sc = sc_if->msk_softc;
 4157         ifp = sc_if->msk_ifp;
 4158 
 4159         callout_stop(&sc_if->msk_tick_ch);
 4160         sc_if->msk_watchdog_timer = 0;
 4161 
 4162         /* Disable interrupts. */
 4163         if (sc_if->msk_port == MSK_PORT_A) {
 4164                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 4165                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 4166         } else {
 4167                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 4168                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 4169         }
 4170         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 4171         CSR_READ_4(sc, B0_HWE_IMSK);
 4172         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 4173         CSR_READ_4(sc, B0_IMSK);
 4174 
 4175         /* Disable Tx/Rx MAC. */
 4176         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4177         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 4178         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 4179         /* Read again to ensure writing. */
 4180         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 4181         /* Update stats and clear counters. */
 4182         msk_stats_update(sc_if);
 4183 
 4184         /* Stop Tx BMU. */
 4185         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 4186         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4187         for (i = 0; i < MSK_TIMEOUT; i++) {
 4188                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 4189                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4190                             BMU_STOP);
 4191                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 4192                 } else
 4193                         break;
 4194                 DELAY(1);
 4195         }
 4196         if (i == MSK_TIMEOUT)
 4197                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 4198         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 4199             RB_RST_SET | RB_DIS_OP_MD);
 4200 
 4201         /* Disable all GMAC interrupt. */
 4202         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 4203         /* Disable PHY interrupt. */
 4204         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 4205 
 4206         /* Disable the RAM Interface Arbiter. */
 4207         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 4208 
 4209         /* Reset the PCI FIFO of the async Tx queue */
 4210         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 4211             BMU_RST_SET | BMU_FIFO_RST);
 4212 
 4213         /* Reset the Tx prefetch units. */
 4214         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 4215             PREF_UNIT_RST_SET);
 4216 
 4217         /* Reset the RAM Buffer async Tx queue. */
 4218         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 4219 
 4220         /* Reset Tx MAC FIFO. */
 4221         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 4222         /* Set Pause Off. */
 4223         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 4224 
 4225         /*
 4226          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 4227          * reach the end of packet and since we can't make sure that we have
 4228          * incoming data, we must reset the BMU while it is not during a DMA
 4229          * transfer. Since it is possible that the Rx path is still active,
 4230          * the Rx RAM buffer will be stopped first, so any possible incoming
 4231          * data will not trigger a DMA. After the RAM buffer is stopped, the
 4232          * BMU is polled until any DMA in progress is ended and only then it
 4233          * will be reset.
 4234          */
 4235 
 4236         /* Disable the RAM Buffer receive queue. */
 4237         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 4238         for (i = 0; i < MSK_TIMEOUT; i++) {
 4239                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 4240                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 4241                         break;
 4242                 DELAY(1);
 4243         }
 4244         if (i == MSK_TIMEOUT)
 4245                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 4246         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 4247             BMU_RST_SET | BMU_FIFO_RST);
 4248         /* Reset the Rx prefetch unit. */
 4249         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 4250             PREF_UNIT_RST_SET);
 4251         /* Reset the RAM Buffer receive queue. */
 4252         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 4253         /* Reset Rx MAC FIFO. */
 4254         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 4255 
 4256         /* Free Rx and Tx mbufs still in the queues. */
 4257         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 4258                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 4259                 if (rxd->rx_m != NULL) {
 4260                         bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
 4261                             rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4262                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 4263                             rxd->rx_dmamap);
 4264                         m_freem(rxd->rx_m);
 4265                         rxd->rx_m = NULL;
 4266                 }
 4267         }
 4268         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 4269                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 4270                 if (jrxd->rx_m != NULL) {
 4271                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4272                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 4273                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 4274                             jrxd->rx_dmamap);
 4275                         m_freem(jrxd->rx_m);
 4276                         jrxd->rx_m = NULL;
 4277                 }
 4278         }
 4279         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 4280                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 4281                 if (txd->tx_m != NULL) {
 4282                         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
 4283                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
 4284                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 4285                             txd->tx_dmamap);
 4286                         m_freem(txd->tx_m);
 4287                         txd->tx_m = NULL;
 4288                 }
 4289         }
 4290 
 4291         /*
 4292          * Mark the interface down.
 4293          */
 4294         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 4295         sc_if->msk_flags &= ~MSK_FLAG_LINK;
 4296 }
 4297 
 4298 /*
 4299  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
 4300  * counter clears high 16 bits of the counter such that accessing
 4301  * lower 16 bits should be the last operation.
 4302  */
 4303 #define MSK_READ_MIB32(x, y)                                    \
 4304         ((((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) +      \
 4305         (uint32_t)GMAC_READ_2(sc, x, y))
 4306 #define MSK_READ_MIB64(x, y)                                    \
 4307         ((((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) +       \
 4308         (uint64_t)MSK_READ_MIB32(x, y))
 4309 
 4310 static void
 4311 msk_stats_clear(struct msk_if_softc *sc_if)
 4312 {
 4313         struct msk_softc *sc;
 4314         uint16_t gmac;
 4315         int i;
 4316 
 4317         MSK_IF_LOCK_ASSERT(sc_if);
 4318 
 4319         sc = sc_if->msk_softc;
 4320         /* Set MIB Clear Counter Mode. */
 4321         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4322         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4323         /* Read all MIB Counters with Clear Mode set. */
 4324         for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t))
 4325                 (void)MSK_READ_MIB32(sc_if->msk_port, i);
 4326         /* Clear MIB Clear Counter Mode. */
 4327         gmac &= ~GM_PAR_MIB_CLR;
 4328         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4329 }
 4330 
 4331 static void
 4332 msk_stats_update(struct msk_if_softc *sc_if)
 4333 {
 4334         struct msk_softc *sc;
 4335         struct ifnet *ifp;
 4336         struct msk_hw_stats *stats;
 4337         uint16_t gmac;
 4338 
 4339         MSK_IF_LOCK_ASSERT(sc_if);
 4340 
 4341         ifp = sc_if->msk_ifp;
 4342         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 4343                 return;
 4344         sc = sc_if->msk_softc;
 4345         stats = &sc_if->msk_stats;
 4346         /* Set MIB Clear Counter Mode. */
 4347         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 4348         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 4349 
 4350         /* Rx stats. */
 4351         stats->rx_ucast_frames +=
 4352             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK);
 4353         stats->rx_bcast_frames +=
 4354             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK);
 4355         stats->rx_pause_frames +=
 4356             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE);
 4357         stats->rx_mcast_frames +=
 4358             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK);
 4359         stats->rx_crc_errs +=
 4360             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR);
 4361         stats->rx_good_octets +=
 4362             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO);
 4363         stats->rx_bad_octets +=
 4364             MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO);
 4365         stats->rx_runts +=
 4366             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT);
 4367         stats->rx_runt_errs +=
 4368             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG);
 4369         stats->rx_pkts_64 +=
 4370             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B);
 4371         stats->rx_pkts_65_127 +=
 4372             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B);
 4373         stats->rx_pkts_128_255 +=
 4374             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B);
 4375         stats->rx_pkts_256_511 +=
 4376             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B);
 4377         stats->rx_pkts_512_1023 +=
 4378             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B);
 4379         stats->rx_pkts_1024_1518 +=
 4380             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B);
 4381         stats->rx_pkts_1519_max +=
 4382             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ);
 4383         stats->rx_pkts_too_long +=
 4384             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR);
 4385         stats->rx_pkts_jabbers +=
 4386             MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT);
 4387         stats->rx_fifo_oflows +=
 4388             MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV);
 4389 
 4390         /* Tx stats. */
 4391         stats->tx_ucast_frames +=
 4392             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK);
 4393         stats->tx_bcast_frames +=
 4394             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK);
 4395         stats->tx_pause_frames +=
 4396             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE);
 4397         stats->tx_mcast_frames +=
 4398             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK);
 4399         stats->tx_octets +=
 4400             MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO);
 4401         stats->tx_pkts_64 +=
 4402             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B);
 4403         stats->tx_pkts_65_127 +=
 4404             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B);
 4405         stats->tx_pkts_128_255 +=
 4406             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B);
 4407         stats->tx_pkts_256_511 +=
 4408             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B);
 4409         stats->tx_pkts_512_1023 +=
 4410             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B);
 4411         stats->tx_pkts_1024_1518 +=
 4412             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B);
 4413         stats->tx_pkts_1519_max +=
 4414             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ);
 4415         stats->tx_colls +=
 4416             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL);
 4417         stats->tx_late_colls +=
 4418             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL);
 4419         stats->tx_excess_colls +=
 4420             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL);
 4421         stats->tx_multi_colls +=
 4422             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL);
 4423         stats->tx_single_colls +=
 4424             MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL);
 4425         stats->tx_underflows +=
 4426             MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR);
 4427         /* Clear MIB Clear Counter Mode. */
 4428         gmac &= ~GM_PAR_MIB_CLR;
 4429         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 4430 }
 4431 
 4432 static int
 4433 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS)
 4434 {
 4435         struct msk_softc *sc;
 4436         struct msk_if_softc *sc_if;
 4437         uint32_t result, *stat;
 4438         int off;
 4439 
 4440         sc_if = (struct msk_if_softc *)arg1;
 4441         sc = sc_if->msk_softc;
 4442         off = arg2;
 4443         stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off);
 4444 
 4445         MSK_IF_LOCK(sc_if);
 4446         result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4447         result += *stat;
 4448         MSK_IF_UNLOCK(sc_if);
 4449 
 4450         return (sysctl_handle_int(oidp, &result, 0, req));
 4451 }
 4452 
 4453 static int
 4454 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS)
 4455 {
 4456         struct msk_softc *sc;
 4457         struct msk_if_softc *sc_if;
 4458         uint64_t result, *stat;
 4459         int off;
 4460 
 4461         sc_if = (struct msk_if_softc *)arg1;
 4462         sc = sc_if->msk_softc;
 4463         off = arg2;
 4464         stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off);
 4465 
 4466         MSK_IF_LOCK(sc_if);
 4467         result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2);
 4468         result += *stat;
 4469         MSK_IF_UNLOCK(sc_if);
 4470 
 4471         return (sysctl_handle_64(oidp, &result, 0, req));
 4472 }
 4473 
 4474 #undef MSK_READ_MIB32
 4475 #undef MSK_READ_MIB64
 4476 
 4477 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d)                            \
 4478         SYSCTL_ADD_PROC(c, p, OID_AUTO, o,                              \
 4479             CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,              \
 4480             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32,    \
 4481             "IU", d)
 4482 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d)                            \
 4483         SYSCTL_ADD_PROC(c, p, OID_AUTO, o,                              \
 4484             CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,               \
 4485             sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64,    \
 4486             "QU", d)
 4487 
 4488 static void
 4489 msk_sysctl_node(struct msk_if_softc *sc_if)
 4490 {
 4491         struct sysctl_ctx_list *ctx;
 4492         struct sysctl_oid_list *child, *schild;
 4493         struct sysctl_oid *tree;
 4494 
 4495         ctx = device_get_sysctl_ctx(sc_if->msk_if_dev);
 4496         child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev));
 4497 
 4498         tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
 4499             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK Statistics");
 4500         schild = SYSCTL_CHILDREN(tree);
 4501         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx",
 4502             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK RX Statistics");
 4503         child = SYSCTL_CHILDREN(tree);
 4504         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4505             child, rx_ucast_frames, "Good unicast frames");
 4506         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4507             child, rx_bcast_frames, "Good broadcast frames");
 4508         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4509             child, rx_pause_frames, "Pause frames");
 4510         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4511             child, rx_mcast_frames, "Multicast frames");
 4512         MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs",
 4513             child, rx_crc_errs, "CRC errors");
 4514         MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets",
 4515             child, rx_good_octets, "Good octets");
 4516         MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets",
 4517             child, rx_bad_octets, "Bad octets");
 4518         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4519             child, rx_pkts_64, "64 bytes frames");
 4520         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4521             child, rx_pkts_65_127, "65 to 127 bytes frames");
 4522         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4523             child, rx_pkts_128_255, "128 to 255 bytes frames");
 4524         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4525             child, rx_pkts_256_511, "256 to 511 bytes frames");
 4526         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4527             child, rx_pkts_512_1023, "512 to 1023 bytes frames");
 4528         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4529             child, rx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4530         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4531             child, rx_pkts_1519_max, "1519 to max frames");
 4532         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long",
 4533             child, rx_pkts_too_long, "frames too long");
 4534         MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers",
 4535             child, rx_pkts_jabbers, "Jabber errors");
 4536         MSK_SYSCTL_STAT32(sc_if, ctx, "overflows",
 4537             child, rx_fifo_oflows, "FIFO overflows");
 4538 
 4539         tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx",
 4540             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MSK TX Statistics");
 4541         child = SYSCTL_CHILDREN(tree);
 4542         MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames",
 4543             child, tx_ucast_frames, "Unicast frames");
 4544         MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames",
 4545             child, tx_bcast_frames, "Broadcast frames");
 4546         MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames",
 4547             child, tx_pause_frames, "Pause frames");
 4548         MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames",
 4549             child, tx_mcast_frames, "Multicast frames");
 4550         MSK_SYSCTL_STAT64(sc_if, ctx, "octets",
 4551             child, tx_octets, "Octets");
 4552         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64",
 4553             child, tx_pkts_64, "64 bytes frames");
 4554         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127",
 4555             child, tx_pkts_65_127, "65 to 127 bytes frames");
 4556         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255",
 4557             child, tx_pkts_128_255, "128 to 255 bytes frames");
 4558         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511",
 4559             child, tx_pkts_256_511, "256 to 511 bytes frames");
 4560         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023",
 4561             child, tx_pkts_512_1023, "512 to 1023 bytes frames");
 4562         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518",
 4563             child, tx_pkts_1024_1518, "1024 to 1518 bytes frames");
 4564         MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max",
 4565             child, tx_pkts_1519_max, "1519 to max frames");
 4566         MSK_SYSCTL_STAT32(sc_if, ctx, "colls",
 4567             child, tx_colls, "Collisions");
 4568         MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls",
 4569             child, tx_late_colls, "Late collisions");
 4570         MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls",
 4571             child, tx_excess_colls, "Excessive collisions");
 4572         MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls",
 4573             child, tx_multi_colls, "Multiple collisions");
 4574         MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls",
 4575             child, tx_single_colls, "Single collisions");
 4576         MSK_SYSCTL_STAT32(sc_if, ctx, "underflows",
 4577             child, tx_underflows, "FIFO underflows");
 4578 }
 4579 
 4580 #undef MSK_SYSCTL_STAT32
 4581 #undef MSK_SYSCTL_STAT64
 4582 
 4583 static int
 4584 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
 4585 {
 4586         int error, value;
 4587 
 4588         if (!arg1)
 4589                 return (EINVAL);
 4590         value = *(int *)arg1;
 4591         error = sysctl_handle_int(oidp, &value, 0, req);
 4592         if (error || !req->newptr)
 4593                 return (error);
 4594         if (value < low || value > high)
 4595                 return (EINVAL);
 4596         *(int *)arg1 = value;
 4597 
 4598         return (0);
 4599 }
 4600 
 4601 static int
 4602 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS)
 4603 {
 4604 
 4605         return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN,
 4606             MSK_PROC_MAX));
 4607 }

Cache object: 148d897a6758f6d511c94744089f590f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.