The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/netif/msk/if_msk.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  *
    3  * Name   : sky2.c
    4  * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
    5  * Version: $Revision: 1.23 $
    6  * Date   : $Date: 2005/12/22 09:04:11 $
    7  * Purpose: Main driver source file
    8  *
    9  *****************************************************************************/
   10 
   11 /******************************************************************************
   12  *
   13  *      LICENSE:
   14  *      Copyright (C) Marvell International Ltd. and/or its affiliates
   15  *
   16  *      The computer program files contained in this folder ("Files")
   17  *      are provided to you under the BSD-type license terms provided
   18  *      below, and any use of such Files and any derivative works
   19  *      thereof created by you shall be governed by the following terms
   20  *      and conditions:
   21  *
   22  *      - Redistributions of source code must retain the above copyright
   23  *        notice, this list of conditions and the following disclaimer.
   24  *      - Redistributions in binary form must reproduce the above
   25  *        copyright notice, this list of conditions and the following
   26  *        disclaimer in the documentation and/or other materials provided
   27  *        with the distribution.
   28  *      - Neither the name of Marvell nor the names of its contributors
   29  *        may be used to endorse or promote products derived from this
   30  *        software without specific prior written permission.
   31  *
   32  *      THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   33  *      "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   34  *      LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   35  *      FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   36  *      COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   37  *      INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   38  *      BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
   39  *      LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  *      HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
   41  *      STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   42  *      ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
   43  *      OF THE POSSIBILITY OF SUCH DAMAGE.
   44  *      /LICENSE
   45  *
   46  *****************************************************************************/
   47 
   48 /*-
   49  * Copyright (c) 1997, 1998, 1999, 2000
   50  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
   51  *
   52  * Redistribution and use in source and binary forms, with or without
   53  * modification, are permitted provided that the following conditions
   54  * are met:
   55  * 1. Redistributions of source code must retain the above copyright
   56  *    notice, this list of conditions and the following disclaimer.
   57  * 2. Redistributions in binary form must reproduce the above copyright
   58  *    notice, this list of conditions and the following disclaimer in the
   59  *    documentation and/or other materials provided with the distribution.
   60  * 3. All advertising materials mentioning features or use of this software
   61  *    must display the following acknowledgement:
   62  *      This product includes software developed by Bill Paul.
   63  * 4. Neither the name of the author nor the names of any co-contributors
   64  *    may be used to endorse or promote products derived from this software
   65  *    without specific prior written permission.
   66  *
   67  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   70  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   71  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   72  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   73  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   74  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   75  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   76  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   77  * THE POSSIBILITY OF SUCH DAMAGE.
   78  */
   79 /*-
   80  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
   81  *
   82  * Permission to use, copy, modify, and distribute this software for any
   83  * purpose with or without fee is hereby granted, provided that the above
   84  * copyright notice and this permission notice appear in all copies.
   85  *
   86  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
   87  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
   88  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
   89  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
   90  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   91  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   92  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
   93  */
   94 
   95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
   96 
   97 /*
   98  * Device driver for the Marvell Yukon II Ethernet controller.
   99  * Due to lack of documentation, this driver is based on the code from
  100  * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
  101  */
  102 
  103 #include <sys/param.h>
  104 #include <sys/endian.h>
  105 #include <sys/kernel.h>
  106 #include <sys/bus.h>
  107 #include <sys/in_cksum.h>
  108 #include <sys/interrupt.h>
  109 #include <sys/malloc.h>
  110 #include <sys/proc.h>
  111 #include <sys/rman.h>
  112 #include <sys/serialize.h>
  113 #include <sys/socket.h>
  114 #include <sys/sockio.h>
  115 #include <sys/sysctl.h>
  116 
  117 #include <net/ethernet.h>
  118 #include <net/if.h>
  119 #include <net/bpf.h>
  120 #include <net/if_arp.h>
  121 #include <net/if_dl.h>
  122 #include <net/if_media.h>
  123 #include <net/ifq_var.h>
  124 #include <net/vlan/if_vlan_var.h>
  125 
  126 #include <netinet/ip.h>
  127 #include <netinet/ip_var.h>
  128 
  129 #include <dev/netif/mii_layer/miivar.h>
  130 
  131 #include <bus/pci/pcireg.h>
  132 #include <bus/pci/pcivar.h>
  133 
  134 #include "if_mskreg.h"
  135 
  136 /* "device miibus" required.  See GENERIC if you get errors here. */
  137 #include "miibus_if.h"
  138 
  139 #define MSK_CSUM_FEATURES       (CSUM_TCP | CSUM_UDP)
  140 
  141 /*
  142  * Devices supported by this driver.
  143  */
  144 static const struct msk_product {
  145         uint16_t        msk_vendorid;
  146         uint16_t        msk_deviceid;
  147         const char      *msk_name;
  148 } msk_products[] = {
  149         { VENDORID_SK, DEVICEID_SK_YUKON2,
  150             "SK-9Sxx Gigabit Ethernet" },
  151         { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
  152             "SK-9Exx Gigabit Ethernet"},
  153         { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
  154             "Marvell Yukon 88E8021CU Gigabit Ethernet" },
  155         { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
  156             "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
  157         { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
  158             "Marvell Yukon 88E8022CU Gigabit Ethernet" },
  159         { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
  160             "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
  161         { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
  162             "Marvell Yukon 88E8061CU Gigabit Ethernet" },
  163         { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
  164             "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
  165         { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
  166             "Marvell Yukon 88E8062CU Gigabit Ethernet" },
  167         { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
  168             "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
  169         { VENDORID_MARVELL, DEVICEID_MRVL_8035,
  170             "Marvell Yukon 88E8035 Fast Ethernet" },
  171         { VENDORID_MARVELL, DEVICEID_MRVL_8036,
  172             "Marvell Yukon 88E8036 Fast Ethernet" },
  173         { VENDORID_MARVELL, DEVICEID_MRVL_8038,
  174             "Marvell Yukon 88E8038 Fast Ethernet" },
  175         { VENDORID_MARVELL, DEVICEID_MRVL_8039,
  176             "Marvell Yukon 88E8039 Fast Ethernet" },
  177         { VENDORID_MARVELL, DEVICEID_MRVL_8040,
  178             "Marvell Yukon 88E8040 Fast Ethernet" },
  179         { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
  180             "Marvell Yukon 88E8040T Fast Ethernet" },
  181         { VENDORID_MARVELL, DEVICEID_MRVL_8042,
  182             "Marvell Yukon 88E8042 Fast Ethernet" },
  183         { VENDORID_MARVELL, DEVICEID_MRVL_8048,
  184             "Marvell Yukon 88E8048 Fast Ethernet" },
  185         { VENDORID_MARVELL, DEVICEID_MRVL_4361,
  186             "Marvell Yukon 88E8050 Gigabit Ethernet" },
  187         { VENDORID_MARVELL, DEVICEID_MRVL_4360,
  188             "Marvell Yukon 88E8052 Gigabit Ethernet" },
  189         { VENDORID_MARVELL, DEVICEID_MRVL_4362,
  190             "Marvell Yukon 88E8053 Gigabit Ethernet" },
  191         { VENDORID_MARVELL, DEVICEID_MRVL_4363,
  192             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  193         { VENDORID_MARVELL, DEVICEID_MRVL_4364,
  194             "Marvell Yukon 88E8056 Gigabit Ethernet" },
  195         { VENDORID_MARVELL, DEVICEID_MRVL_4365,
  196             "Marvell Yukon 88E8070 Gigabit Ethernet" },
  197         { VENDORID_MARVELL, DEVICEID_MRVL_436A,
  198             "Marvell Yukon 88E8058 Gigabit Ethernet" },
  199         { VENDORID_MARVELL, DEVICEID_MRVL_436B,
  200             "Marvell Yukon 88E8071 Gigabit Ethernet" },
  201         { VENDORID_MARVELL, DEVICEID_MRVL_436C,
  202             "Marvell Yukon 88E8072 Gigabit Ethernet" },
  203         { VENDORID_MARVELL, DEVICEID_MRVL_436D,
  204             "Marvell Yukon 88E8055 Gigabit Ethernet" },
  205         { VENDORID_MARVELL, DEVICEID_MRVL_4370,
  206             "Marvell Yukon 88E8075 Gigabit Ethernet" },
  207         { VENDORID_MARVELL, DEVICEID_MRVL_4380,
  208             "Marvell Yukon 88E8057 Gigabit Ethernet" },
  209         { VENDORID_MARVELL, DEVICEID_MRVL_4381,
  210             "Marvell Yukon 88E8059 Gigabit Ethernet" },
  211         { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
  212             "D-Link 550SX Gigabit Ethernet" },
  213         { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
  214             "D-Link 560T Gigabit Ethernet" },
  215         { 0, 0, NULL }
  216 };
  217 
  218 static const char *model_name[] = {
  219         "Yukon XL",
  220         "Yukon EC Ultra",
  221         "Yukon EX",
  222         "Yukon EC",
  223         "Yukon FE",
  224         "Yukon FE+",
  225         "Yukon Supreme",
  226         "Yukon Ultra 2",
  227         "Yukon Unknown",
  228         "Yukon Optima"
  229 };
  230 
  231 static int      mskc_probe(device_t);
  232 static int      mskc_attach(device_t);
  233 static int      mskc_detach(device_t);
  234 static int      mskc_shutdown(device_t);
  235 static int      mskc_suspend(device_t);
  236 static int      mskc_resume(device_t);
  237 static void     mskc_intr(void *);
  238 
  239 static void     mskc_reset(struct msk_softc *);
  240 static void     mskc_set_imtimer(struct msk_softc *);
  241 static void     mskc_intr_hwerr(struct msk_softc *);
  242 static int      mskc_handle_events(struct msk_softc *);
  243 static void     mskc_phy_power(struct msk_softc *, int);
  244 static int      mskc_setup_rambuffer(struct msk_softc *);
  245 static int      mskc_status_dma_alloc(struct msk_softc *);
  246 static void     mskc_status_dma_free(struct msk_softc *);
  247 static int      mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
  248 static int      mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
  249 
  250 static int      msk_probe(device_t);
  251 static int      msk_attach(device_t);
  252 static int      msk_detach(device_t);
  253 static int      msk_miibus_readreg(device_t, int, int);
  254 static int      msk_miibus_writereg(device_t, int, int, int);
  255 static void     msk_miibus_statchg(device_t);
  256 
  257 static void     msk_init(void *);
  258 static int      msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
  259 static void     msk_start(struct ifnet *, struct ifaltq_subque *);
  260 static void     msk_watchdog(struct ifnet *);
  261 static int      msk_mediachange(struct ifnet *);
  262 static void     msk_mediastatus(struct ifnet *, struct ifmediareq *);
  263 
  264 static void     msk_tick(void *);
  265 static void     msk_intr_phy(struct msk_if_softc *);
  266 static void     msk_intr_gmac(struct msk_if_softc *);
  267 static __inline void
  268                 msk_rxput(struct msk_if_softc *);
  269 static void     msk_handle_hwerr(struct msk_if_softc *, uint32_t);
  270 static void     msk_rxeof(struct msk_if_softc *, uint32_t, int);
  271 static void     msk_txeof(struct msk_if_softc *, int);
  272 static void     msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
  273 static void     msk_set_rambuffer(struct msk_if_softc *);
  274 static void     msk_stop(struct msk_if_softc *);
  275 
  276 static int      msk_txrx_dma_alloc(struct msk_if_softc *);
  277 static void     msk_txrx_dma_free(struct msk_if_softc *);
  278 static int      msk_init_rx_ring(struct msk_if_softc *);
  279 static void     msk_init_tx_ring(struct msk_if_softc *);
  280 static __inline void
  281                 msk_discard_rxbuf(struct msk_if_softc *, int);
  282 static int      msk_newbuf(struct msk_if_softc *, int, int);
  283 static int      msk_encap(struct msk_if_softc *, struct mbuf **);
  284 
  285 #ifdef MSK_JUMBO
  286 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
  287 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
  288 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
  289 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
  290 static void *msk_jalloc(struct msk_if_softc *);
  291 static void msk_jfree(void *, void *);
  292 #endif
  293 
  294 static int      msk_phy_readreg(struct msk_if_softc *, int, int);
  295 static int      msk_phy_writereg(struct msk_if_softc *, int, int, int);
  296 
  297 static void     msk_rxfilter(struct msk_if_softc *);
  298 static void     msk_setvlan(struct msk_if_softc *, struct ifnet *);
  299 static void     msk_set_tx_stfwd(struct msk_if_softc *);
  300 
  301 static int      msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
  302                                   void **, bus_addr_t *, bus_dmamap_t *);
  303 static void     msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
  304 
  305 static device_method_t mskc_methods[] = {
  306         /* Device interface */
  307         DEVMETHOD(device_probe,         mskc_probe),
  308         DEVMETHOD(device_attach,        mskc_attach),
  309         DEVMETHOD(device_detach,        mskc_detach),
  310         DEVMETHOD(device_suspend,       mskc_suspend),
  311         DEVMETHOD(device_resume,        mskc_resume),
  312         DEVMETHOD(device_shutdown,      mskc_shutdown),
  313 
  314         /* bus interface */
  315         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  316         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  317 
  318         { NULL, NULL }
  319 };
  320 
  321 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
  322 static devclass_t mskc_devclass;
  323 
  324 static device_method_t msk_methods[] = {
  325         /* Device interface */
  326         DEVMETHOD(device_probe,         msk_probe),
  327         DEVMETHOD(device_attach,        msk_attach),
  328         DEVMETHOD(device_detach,        msk_detach),
  329         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  330 
  331         /* bus interface */
  332         DEVMETHOD(bus_print_child,      bus_generic_print_child),
  333         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
  334 
  335         /* MII interface */
  336         DEVMETHOD(miibus_readreg,       msk_miibus_readreg),
  337         DEVMETHOD(miibus_writereg,      msk_miibus_writereg),
  338         DEVMETHOD(miibus_statchg,       msk_miibus_statchg),
  339 
  340         { NULL, NULL }
  341 };
  342 
  343 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
  344 static devclass_t msk_devclass;
  345 
  346 DECLARE_DUMMY_MODULE(if_msk);
  347 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL);
  348 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL);
  349 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
  350 
  351 static int      mskc_msi_enable = 0;
  352 static int      mskc_intr_rate = 0;
  353 static int      mskc_process_limit = MSK_PROC_DEFAULT;
  354 
  355 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
  356 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
  357 TUNABLE_INT("hw.mskc.msi.enable", &mskc_msi_enable);
  358 
  359 static int
  360 msk_miibus_readreg(device_t dev, int phy, int reg)
  361 {
  362         struct msk_if_softc *sc_if;
  363 
  364         if (phy != PHY_ADDR_MARV)
  365                 return (0);
  366 
  367         sc_if = device_get_softc(dev);
  368 
  369         return (msk_phy_readreg(sc_if, phy, reg));
  370 }
  371 
  372 static int
  373 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
  374 {
  375         struct msk_softc *sc;
  376         int i, val;
  377 
  378         sc = sc_if->msk_softc;
  379 
  380         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  381             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
  382 
  383         for (i = 0; i < MSK_TIMEOUT; i++) {
  384                 DELAY(1);
  385                 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
  386                 if ((val & GM_SMI_CT_RD_VAL) != 0) {
  387                         val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
  388                         break;
  389                 }
  390         }
  391 
  392         if (i == MSK_TIMEOUT) {
  393                 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
  394                 val = 0;
  395         }
  396 
  397         return (val);
  398 }
  399 
  400 static int
  401 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
  402 {
  403         struct msk_if_softc *sc_if;
  404 
  405         if (phy != PHY_ADDR_MARV)
  406                 return (0);
  407 
  408         sc_if = device_get_softc(dev);
  409 
  410         return (msk_phy_writereg(sc_if, phy, reg, val));
  411 }
  412 
  413 static int
  414 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
  415 {
  416         struct msk_softc *sc;
  417         int i;
  418 
  419         sc = sc_if->msk_softc;
  420 
  421         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
  422         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
  423             GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
  424         for (i = 0; i < MSK_TIMEOUT; i++) {
  425                 DELAY(1);
  426                 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
  427                     GM_SMI_CT_BUSY) == 0)
  428                         break;
  429         }
  430         if (i == MSK_TIMEOUT)
  431                 if_printf(sc_if->msk_ifp, "phy write timeout\n");
  432 
  433         return (0);
  434 }
  435 
  436 static void
  437 msk_miibus_statchg(device_t dev)
  438 {
  439         struct msk_if_softc *sc_if;
  440         struct msk_softc *sc;
  441         struct mii_data *mii;
  442         uint32_t gmac;
  443 
  444         sc_if = device_get_softc(dev);
  445         sc = sc_if->msk_softc;
  446 
  447         mii = device_get_softc(sc_if->msk_miibus);
  448 
  449         sc_if->msk_link = 0;
  450         if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
  451             (IFM_AVALID | IFM_ACTIVE)) {
  452                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  453                 case IFM_10_T:
  454                 case IFM_100_TX:
  455                         sc_if->msk_link = 1;
  456                         break;
  457                 case IFM_1000_T:
  458                 case IFM_1000_SX:
  459                 case IFM_1000_LX:
  460                 case IFM_1000_CX:
  461                         if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
  462                                 sc_if->msk_link = 1;
  463                         break;
  464                 }
  465         }
  466 
  467         if (sc_if->msk_link != 0) {
  468                 /* Enable Tx FIFO Underrun. */
  469                 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
  470                     GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
  471                 /*
  472                  * Because mii(4) notify msk(4) that it detected link status
  473                  * change, there is no need to enable automatic
  474                  * speed/flow-control/duplex updates.
  475                  */
  476                 gmac = GM_GPCR_AU_ALL_DIS;
  477                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
  478                 case IFM_1000_SX:
  479                 case IFM_1000_T:
  480                         gmac |= GM_GPCR_SPEED_1000;
  481                         break;
  482                 case IFM_100_TX:
  483                         gmac |= GM_GPCR_SPEED_100;
  484                         break;
  485                 case IFM_10_T:
  486                         break;
  487                 }
  488 
  489                 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)
  490                         gmac |= GM_GPCR_DUP_FULL;
  491                 else
  492                         gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
  493                 /* Disable Rx flow control. */
  494                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
  495                         gmac |= GM_GPCR_FC_RX_DIS;
  496                 /* Disable Tx flow control. */
  497                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
  498                         gmac |= GM_GPCR_FC_TX_DIS;
  499                 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
  500                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  501                 /* Read again to ensure writing. */
  502                 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  503 
  504                 gmac = GMC_PAUSE_OFF;
  505                 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) &&
  506                     ((mii->mii_media_active & IFM_GMASK) & IFM_FDX))
  507                         gmac = GMC_PAUSE_ON;
  508                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
  509 
  510                 /* Enable PHY interrupt for FIFO underrun/overflow. */
  511                 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
  512                     PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
  513         } else {
  514                 /*
  515                  * Link state changed to down.
  516                  * Disable PHY interrupts.
  517                  */
  518                 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
  519                 /* Disable Rx/Tx MAC. */
  520                 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  521                 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) {
  522                         gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
  523                         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
  524                         /* Read again to ensure writing. */
  525                         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
  526                 }
  527         }
  528 }
  529 
  530 static void
  531 msk_rxfilter(struct msk_if_softc *sc_if)
  532 {
  533         struct msk_softc *sc;
  534         struct ifnet *ifp;
  535         struct ifmultiaddr *ifma;
  536         uint32_t mchash[2];
  537         uint32_t crc;
  538         uint16_t mode;
  539 
  540         sc = sc_if->msk_softc;
  541         ifp = sc_if->msk_ifp;
  542 
  543         bzero(mchash, sizeof(mchash));
  544         mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
  545         if ((ifp->if_flags & IFF_PROMISC) != 0) {
  546                 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  547         } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
  548                 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
  549                 mchash[0] = 0xffff;
  550                 mchash[1] = 0xffff;
  551         } else {
  552                 mode |= GM_RXCR_UCF_ENA;
  553                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
  554                         if (ifma->ifma_addr->sa_family != AF_LINK)
  555                                 continue;
  556                         crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  557                             ifma->ifma_addr), ETHER_ADDR_LEN);
  558                         /* Just want the 6 least significant bits. */
  559                         crc &= 0x3f;
  560                         /* Set the corresponding bit in the hash table. */
  561                         mchash[crc >> 5] |= 1 << (crc & 0x1f);
  562                 }
  563                 if (mchash[0] != 0 || mchash[1] != 0)
  564                         mode |= GM_RXCR_MCF_ENA;
  565         }
  566 
  567         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
  568             mchash[0] & 0xffff);
  569         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
  570             (mchash[0] >> 16) & 0xffff);
  571         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
  572             mchash[1] & 0xffff);
  573         GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
  574             (mchash[1] >> 16) & 0xffff);
  575         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
  576 }
  577 
  578 static void
  579 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
  580 {
  581         struct msk_softc *sc;
  582 
  583         sc = sc_if->msk_softc;
  584         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
  585                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  586                     RX_VLAN_STRIP_ON);
  587                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  588                     TX_VLAN_TAG_ON);
  589         } else {
  590                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
  591                     RX_VLAN_STRIP_OFF);
  592                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
  593                     TX_VLAN_TAG_OFF);
  594         }
  595 }
  596 
  597 static int
  598 msk_init_rx_ring(struct msk_if_softc *sc_if)
  599 {
  600         struct msk_ring_data *rd;
  601         struct msk_rxdesc *rxd;
  602         int i, prod;
  603 
  604         sc_if->msk_cdata.msk_rx_cons = 0;
  605         sc_if->msk_cdata.msk_rx_prod = 0;
  606         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  607 
  608         rd = &sc_if->msk_rdata;
  609         bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
  610         prod = sc_if->msk_cdata.msk_rx_prod;
  611         for (i = 0; i < MSK_RX_RING_CNT; i++) {
  612                 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
  613                 rxd->rx_m = NULL;
  614                 rxd->rx_le = &rd->msk_rx_ring[prod];
  615                 if (msk_newbuf(sc_if, prod, 1) != 0)
  616                         return (ENOBUFS);
  617                 MSK_INC(prod, MSK_RX_RING_CNT);
  618         }
  619 
  620         /* Update prefetch unit. */
  621         sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
  622         CSR_WRITE_2(sc_if->msk_softc,
  623             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  624             sc_if->msk_cdata.msk_rx_prod);
  625 
  626         return (0);
  627 }
  628 
  629 #ifdef MSK_JUMBO
  630 static int
  631 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
  632 {
  633         struct msk_ring_data *rd;
  634         struct msk_rxdesc *rxd;
  635         int i, prod;
  636 
  637         MSK_IF_LOCK_ASSERT(sc_if);
  638 
  639         sc_if->msk_cdata.msk_rx_cons = 0;
  640         sc_if->msk_cdata.msk_rx_prod = 0;
  641         sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
  642 
  643         rd = &sc_if->msk_rdata;
  644         bzero(rd->msk_jumbo_rx_ring,
  645             sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
  646         prod = sc_if->msk_cdata.msk_rx_prod;
  647         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
  648                 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
  649                 rxd->rx_m = NULL;
  650                 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
  651                 if (msk_jumbo_newbuf(sc_if, prod) != 0)
  652                         return (ENOBUFS);
  653                 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
  654         }
  655 
  656         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
  657             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
  658             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  659 
  660         sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
  661         CSR_WRITE_2(sc_if->msk_softc,
  662             Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
  663             sc_if->msk_cdata.msk_rx_prod);
  664 
  665         return (0);
  666 }
  667 #endif
  668 
  669 static void
  670 msk_init_tx_ring(struct msk_if_softc *sc_if)
  671 {
  672         struct msk_ring_data *rd;
  673         struct msk_txdesc *txd;
  674         int i;
  675 
  676         sc_if->msk_cdata.msk_tx_prod = 0;
  677         sc_if->msk_cdata.msk_tx_cons = 0;
  678         sc_if->msk_cdata.msk_tx_cnt = 0;
  679 
  680         rd = &sc_if->msk_rdata;
  681         bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
  682         for (i = 0; i < MSK_TX_RING_CNT; i++) {
  683                 txd = &sc_if->msk_cdata.msk_txdesc[i];
  684                 txd->tx_m = NULL;
  685                 txd->tx_le = &rd->msk_tx_ring[i];
  686         }
  687 }
  688 
  689 static __inline void
  690 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
  691 {
  692         struct msk_rx_desc *rx_le;
  693         struct msk_rxdesc *rxd;
  694         struct mbuf *m;
  695 
  696         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  697         m = rxd->rx_m;
  698         rx_le = rxd->rx_le;
  699         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  700 }
  701 
  702 #ifdef MSK_JUMBO
  703 static __inline void
  704 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
  705 {
  706         struct msk_rx_desc *rx_le;
  707         struct msk_rxdesc *rxd;
  708         struct mbuf *m;
  709 
  710         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  711         m = rxd->rx_m;
  712         rx_le = rxd->rx_le;
  713         rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
  714 }
  715 #endif
  716 
  717 static int
  718 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init)
  719 {
  720         struct msk_rx_desc *rx_le;
  721         struct msk_rxdesc *rxd;
  722         struct mbuf *m;
  723         bus_dma_segment_t seg;
  724         bus_dmamap_t map;
  725         int error, nseg;
  726 
  727         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
  728         if (m == NULL)
  729                 return (ENOBUFS);
  730 
  731         m->m_len = m->m_pkthdr.len = MCLBYTES;
  732         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
  733                 m_adj(m, ETHER_ALIGN);
  734 
  735         error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag,
  736                         sc_if->msk_cdata.msk_rx_sparemap,
  737                         m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
  738         if (error) {
  739                 m_freem(m);
  740                 if (init)
  741                         if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n");
  742                 return (error);
  743         }
  744 
  745         rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
  746         if (rxd->rx_m != NULL) {
  747                 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
  748                     BUS_DMASYNC_POSTREAD);
  749                 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
  750         }
  751 
  752         map = rxd->rx_dmamap;
  753         rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
  754         sc_if->msk_cdata.msk_rx_sparemap = map;
  755 
  756         rxd->rx_m = m;
  757         rx_le = rxd->rx_le;
  758         rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
  759         rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER);
  760 
  761         return (0);
  762 }
  763 
  764 #ifdef MSK_JUMBO
  765 static int
  766 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
  767 {
  768         struct msk_rx_desc *rx_le;
  769         struct msk_rxdesc *rxd;
  770         struct mbuf *m;
  771         bus_dma_segment_t segs[1];
  772         bus_dmamap_t map;
  773         int nsegs;
  774         void *buf;
  775 
  776         MGETHDR(m, MB_DONTWAIT, MT_DATA);
  777         if (m == NULL)
  778                 return (ENOBUFS);
  779         buf = msk_jalloc(sc_if);
  780         if (buf == NULL) {
  781                 m_freem(m);
  782                 return (ENOBUFS);
  783         }
  784         /* Attach the buffer to the mbuf. */
  785         MEXTADD(m, buf, MSK_JLEN, msk_jfree, sc_if, 0, EXT_NET_DRV);
  786         if ((m->m_flags & M_EXT) == 0) {
  787                 m_freem(m);
  788                 return (ENOBUFS);
  789         }
  790         m->m_pkthdr.len = m->m_len = MSK_JLEN;
  791         m_adj(m, ETHER_ALIGN);
  792 
  793         if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
  794             sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
  795             BUS_DMA_NOWAIT) != 0) {
  796                 m_freem(m);
  797                 return (ENOBUFS);
  798         }
  799         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  800 
  801         rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
  802         if (rxd->rx_m != NULL) {
  803                 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
  804                     rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
  805                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
  806                     rxd->rx_dmamap);
  807         }
  808         map = rxd->rx_dmamap;
  809         rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
  810         sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
  811         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
  812             BUS_DMASYNC_PREREAD);
  813         rxd->rx_m = m;
  814         rx_le = rxd->rx_le;
  815         rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
  816         rx_le->msk_control =
  817             htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
  818 
  819         return (0);
  820 }
  821 #endif
  822 
  823 /*
  824  * Set media options.
  825  */
  826 static int
  827 msk_mediachange(struct ifnet *ifp)
  828 {
  829         struct msk_if_softc *sc_if = ifp->if_softc;
  830         struct mii_data *mii;
  831         int error;
  832 
  833         mii = device_get_softc(sc_if->msk_miibus);
  834         error = mii_mediachg(mii);
  835 
  836         return (error);
  837 }
  838 
  839 /*
  840  * Report current media status.
  841  */
  842 static void
  843 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
  844 {
  845         struct msk_if_softc *sc_if = ifp->if_softc;
  846         struct mii_data *mii;
  847 
  848         mii = device_get_softc(sc_if->msk_miibus);
  849         mii_pollstat(mii);
  850 
  851         ifmr->ifm_active = mii->mii_media_active;
  852         ifmr->ifm_status = mii->mii_media_status;
  853 }
  854 
  855 static int
  856 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
  857 {
  858         struct msk_if_softc *sc_if;
  859         struct ifreq *ifr;
  860         struct mii_data *mii;
  861         int error, mask;
  862 
  863         sc_if = ifp->if_softc;
  864         ifr = (struct ifreq *)data;
  865         error = 0;
  866 
  867         switch(command) {
  868         case SIOCSIFMTU:
  869 #ifdef MSK_JUMBO
  870                 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
  871                         error = EINVAL;
  872                         break;
  873                 }
  874                 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
  875                     ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
  876                         error = EINVAL;
  877                         break;
  878                 }
  879                 ifp->if_mtu = ifr->ifr_mtu;
  880                 if ((ifp->if_flags & IFF_RUNNING) != 0)
  881                         msk_init(sc_if);
  882 #else
  883                 error = EOPNOTSUPP;
  884 #endif
  885                 break;
  886 
  887         case SIOCSIFFLAGS:
  888                 if (ifp->if_flags & IFF_UP) {
  889                         if (ifp->if_flags & IFF_RUNNING) {
  890                                 if (((ifp->if_flags ^ sc_if->msk_if_flags)
  891                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
  892                                         msk_rxfilter(sc_if);
  893                         } else {
  894                                 if (sc_if->msk_detach == 0)
  895                                         msk_init(sc_if);
  896                         }
  897                 } else {
  898                         if (ifp->if_flags & IFF_RUNNING)
  899                                 msk_stop(sc_if);
  900                 }
  901                 sc_if->msk_if_flags = ifp->if_flags;
  902                 break;
  903 
  904         case SIOCADDMULTI:
  905         case SIOCDELMULTI:
  906                 if (ifp->if_flags & IFF_RUNNING)
  907                         msk_rxfilter(sc_if);
  908                 break;
  909 
  910         case SIOCGIFMEDIA:
  911         case SIOCSIFMEDIA:
  912                 mii = device_get_softc(sc_if->msk_miibus);
  913                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
  914                 break;
  915 
  916         case SIOCSIFCAP:
  917                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
  918                 if ((mask & IFCAP_TXCSUM) != 0) {
  919                         ifp->if_capenable ^= IFCAP_TXCSUM;
  920                         if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
  921                             (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
  922                                 ifp->if_hwassist |= MSK_CSUM_FEATURES;
  923                         else
  924                                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
  925                 }
  926 #ifdef notyet
  927                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
  928                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
  929                         msk_setvlan(sc_if, ifp);
  930                 }
  931 #endif
  932 
  933                 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
  934                     sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
  935                         /*
  936                          * In Yukon EC Ultra, TSO & checksum offload is not
  937                          * supported for jumbo frame.
  938                          */
  939                         ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
  940                         ifp->if_capenable &= ~IFCAP_TXCSUM;
  941                 }
  942                 break;
  943 
  944         default:
  945                 error = ether_ioctl(ifp, command, data);
  946                 break;
  947         }
  948 
  949         return (error);
  950 }
  951 
  952 static int
  953 mskc_probe(device_t dev)
  954 {
  955         const struct msk_product *mp;
  956         uint16_t vendor, devid;
  957 
  958         vendor = pci_get_vendor(dev);
  959         devid = pci_get_device(dev);
  960         for (mp = msk_products; mp->msk_name != NULL; ++mp) {
  961                 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
  962                         device_set_desc(dev, mp->msk_name);
  963                         return (0);
  964                 }
  965         }
  966         return (ENXIO);
  967 }
  968 
  969 static int
  970 mskc_setup_rambuffer(struct msk_softc *sc)
  971 {
  972         int next;
  973         int i;
  974 
  975         /* Get adapter SRAM size. */
  976         sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
  977         if (bootverbose) {
  978                 device_printf(sc->msk_dev,
  979                     "RAM buffer size : %dKB\n", sc->msk_ramsize);
  980         }
  981         if (sc->msk_ramsize == 0)
  982                 return (0);
  983         sc->msk_pflags |= MSK_FLAG_RAMBUF;
  984 
  985         /*
  986          * Give receiver 2/3 of memory and round down to the multiple
  987          * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
  988          * of 1024.
  989          */
  990         sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
  991         sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
  992         for (i = 0, next = 0; i < sc->msk_num_port; i++) {
  993                 sc->msk_rxqstart[i] = next;
  994                 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
  995                 next = sc->msk_rxqend[i] + 1;
  996                 sc->msk_txqstart[i] = next;
  997                 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
  998                 next = sc->msk_txqend[i] + 1;
  999                 if (bootverbose) {
 1000                         device_printf(sc->msk_dev,
 1001                             "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
 1002                             sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
 1003                             sc->msk_rxqend[i]);
 1004                         device_printf(sc->msk_dev,
 1005                             "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
 1006                             sc->msk_txqsize / 1024, sc->msk_txqstart[i],
 1007                             sc->msk_txqend[i]);
 1008                 }
 1009         }
 1010 
 1011         return (0);
 1012 }
 1013 
 1014 static void
 1015 mskc_phy_power(struct msk_softc *sc, int mode)
 1016 {
 1017         uint32_t our, val;
 1018         int i;
 1019 
 1020         switch (mode) {
 1021         case MSK_PHY_POWERUP:
 1022                 /* Switch power to VCC (WA for VAUX problem). */
 1023                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1024                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 1025                 /* Disable Core Clock Division, set Clock Select to 0. */
 1026                 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 1027 
 1028                 val = 0;
 1029                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1030                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1031                         /* Enable bits are inverted. */
 1032                         val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1033                               Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1034                               Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1035                 }
 1036                 /*
 1037                  * Enable PCI & Core Clock, enable clock gating for both Links.
 1038                  */
 1039                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1040 
 1041                 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1042                 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
 1043                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 1044                         if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1045                                 /* Deassert Low Power for 1st PHY. */
 1046                                 our |= PCI_Y2_PHY1_COMA;
 1047                                 if (sc->msk_num_port > 1)
 1048                                         our |= PCI_Y2_PHY2_COMA;
 1049                         }
 1050                 }
 1051                 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
 1052                     sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1053                     sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
 1054                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
 1055                         val &= (PCI_FORCE_ASPM_REQUEST |
 1056                             PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
 1057                             PCI_ASPM_CLKRUN_REQUEST);
 1058                         /* Set all bits to 0 except bits 15..12. */
 1059                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
 1060                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
 1061                         val &= PCI_CTL_TIM_VMAIN_AV_MSK;
 1062                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
 1063                         CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
 1064                         CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
 1065                         /*
 1066                          * Disable status race, workaround for
 1067                          * Yukon EC Ultra & Yukon EX.
 1068                          */
 1069                         val = CSR_READ_4(sc, B2_GP_IO);
 1070                         val |= GLB_GPIO_STAT_RACE_DIS;
 1071                         CSR_WRITE_4(sc, B2_GP_IO, val);
 1072                         CSR_READ_4(sc, B2_GP_IO);
 1073                 }
 1074                 /* Release PHY from PowerDown/COMA mode. */
 1075                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
 1076 
 1077                 for (i = 0; i < sc->msk_num_port; i++) {
 1078                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1079                             GMLC_RST_SET);
 1080                         CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
 1081                             GMLC_RST_CLR);
 1082                 }
 1083                 break;
 1084         case MSK_PHY_POWERDOWN:
 1085                 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1086                 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
 1087                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1088                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1089                         val &= ~PCI_Y2_PHY1_COMA;
 1090                         if (sc->msk_num_port > 1)
 1091                                 val &= ~PCI_Y2_PHY2_COMA;
 1092                 }
 1093                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1094 
 1095                 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 1096                       Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 1097                       Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
 1098                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1099                     sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
 1100                         /* Enable bits are inverted. */
 1101                         val = 0;
 1102                 }
 1103                 /*
 1104                  * Disable PCI & Core Clock, disable clock gating for
 1105                  * both Links.
 1106                  */
 1107                 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
 1108                 CSR_WRITE_1(sc, B0_POWER_CTRL,
 1109                     PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
 1110                 break;
 1111         default:
 1112                 break;
 1113         }
 1114 }
 1115 
 1116 static void
 1117 mskc_reset(struct msk_softc *sc)
 1118 {
 1119         bus_addr_t addr;
 1120         uint16_t status;
 1121         uint32_t val;
 1122         int i;
 1123 
 1124         /* Disable ASF. */
 1125         if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
 1126             sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
 1127                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1128                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1129                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1130                         status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
 1131                         /* Clear AHB bridge & microcontroller reset. */
 1132                         status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
 1133                             Y2_ASF_HCU_CCSR_CPU_RST_MODE);
 1134                         /* Clear ASF microcontroller state. */
 1135                         status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
 1136                         status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
 1137                         CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
 1138                         CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
 1139                 } else {
 1140                         CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
 1141                 }
 1142                 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
 1143                 /*
 1144                  * Since we disabled ASF, S/W reset is required for
 1145                  * Power Management.
 1146                  */
 1147                 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1148                 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1149         }
 1150 
 1151         /* Clear all error bits in the PCI status register. */
 1152         status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 1153         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1154 
 1155         pci_write_config(sc->msk_dev, PCIR_STATUS, status |
 1156             PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 1157             PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
 1158         CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
 1159 
 1160         switch (sc->msk_bustype) {
 1161         case MSK_PEX_BUS:
 1162                 /* Clear all PEX errors. */
 1163                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 1164                 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 1165                 if ((val & PEX_RX_OV) != 0) {
 1166                         sc->msk_intrmask &= ~Y2_IS_HW_ERR;
 1167                         sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 1168                 }
 1169                 break;
 1170         case MSK_PCI_BUS:
 1171         case MSK_PCIX_BUS:
 1172                 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
 1173                 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
 1174                 if (val == 0)
 1175                         pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
 1176                 if (sc->msk_bustype == MSK_PCIX_BUS) {
 1177                         /* Set Cache Line Size opt. */
 1178                         val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
 1179                         val |= PCI_CLS_OPT;
 1180                         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
 1181                 }
 1182                 break;
 1183         }
 1184         /* Set PHY power state. */
 1185         mskc_phy_power(sc, MSK_PHY_POWERUP);
 1186 
 1187         /* Reset GPHY/GMAC Control */
 1188         for (i = 0; i < sc->msk_num_port; i++) {
 1189                 /* GPHY Control reset. */
 1190                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
 1191                 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
 1192                 /* GMAC Control reset. */
 1193                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
 1194                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
 1195                 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
 1196                 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 1197                     sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 1198                         CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
 1199                             GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 1200                             GMC_BYP_RETR_ON);
 1201                 }
 1202         }
 1203 
 1204         if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
 1205             sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
 1206                 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
 1207         if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
 1208                 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
 1209                 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
 1210         }
 1211         CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1212 
 1213         /* LED On. */
 1214         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
 1215 
 1216         /* Clear TWSI IRQ. */
 1217         CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
 1218 
 1219         /* Turn off hardware timer. */
 1220         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
 1221         CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
 1222 
 1223         /* Turn off descriptor polling. */
 1224         CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
 1225 
 1226         /* Turn off time stamps. */
 1227         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
 1228         CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 1229 
 1230         if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
 1231             sc->msk_hw_id == CHIP_ID_YUKON_EC ||
 1232             sc->msk_hw_id == CHIP_ID_YUKON_FE) {
 1233                 /* Configure timeout values. */
 1234                 for (i = 0; i < sc->msk_num_port; i++) {
 1235                         CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
 1236                             RI_RST_SET);
 1237                         CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
 1238                             RI_RST_CLR);
 1239                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
 1240                             MSK_RI_TO_53);
 1241                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
 1242                             MSK_RI_TO_53);
 1243                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
 1244                             MSK_RI_TO_53);
 1245                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
 1246                             MSK_RI_TO_53);
 1247                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
 1248                             MSK_RI_TO_53);
 1249                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
 1250                             MSK_RI_TO_53);
 1251                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
 1252                             MSK_RI_TO_53);
 1253                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
 1254                             MSK_RI_TO_53);
 1255                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
 1256                             MSK_RI_TO_53);
 1257                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
 1258                             MSK_RI_TO_53);
 1259                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
 1260                             MSK_RI_TO_53);
 1261                         CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
 1262                             MSK_RI_TO_53);
 1263                 }
 1264         }
 1265 
 1266         /* Disable all interrupts. */
 1267         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1268         CSR_READ_4(sc, B0_HWE_IMSK);
 1269         CSR_WRITE_4(sc, B0_IMSK, 0);
 1270         CSR_READ_4(sc, B0_IMSK);
 1271 
 1272         /*
 1273          * On dual port PCI-X card, there is an problem where status
 1274          * can be received out of order due to split transactions.
 1275          */
 1276         if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
 1277                 uint16_t pcix_cmd;
 1278 
 1279                 pcix_cmd = pci_read_config(sc->msk_dev,
 1280                     sc->msk_pcixcap + PCIXR_COMMAND, 2);
 1281                 /* Clear Max Outstanding Split Transactions. */
 1282                 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
 1283                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 1284                 pci_write_config(sc->msk_dev,
 1285                     sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
 1286                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 1287         }
 1288         if (sc->msk_pciecap != 0) {
 1289                 /* Change Max. Read Request Size to 2048 bytes. */
 1290                 if (pcie_get_max_readrq(sc->msk_dev) ==
 1291                     PCIEM_DEVCTL_MAX_READRQ_512) {
 1292                         pcie_set_max_readrq(sc->msk_dev,
 1293                             PCIEM_DEVCTL_MAX_READRQ_2048);
 1294                 }
 1295         }
 1296 
 1297         /* Clear status list. */
 1298         bzero(sc->msk_stat_ring,
 1299             sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
 1300         sc->msk_stat_cons = 0;
 1301         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
 1302         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
 1303         /* Set the status list base address. */
 1304         addr = sc->msk_stat_ring_paddr;
 1305         CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
 1306         CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
 1307         /* Set the status list last index. */
 1308         CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
 1309         if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
 1310             sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
 1311                 /* WA for dev. #4.3 */
 1312                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
 1313                 /* WA for dev. #4.18 */
 1314                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
 1315                 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
 1316         } else {
 1317                 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
 1318                 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
 1319                 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
 1320                     sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
 1321                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
 1322                 else
 1323                         CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
 1324                 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
 1325         }
 1326         /*
 1327          * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
 1328          */
 1329         CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
 1330 
 1331         /* Enable status unit. */
 1332         CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
 1333 
 1334         CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
 1335         CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
 1336         CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
 1337 }
 1338 
 1339 static int
 1340 msk_probe(device_t dev)
 1341 {
 1342         struct msk_softc *sc = device_get_softc(device_get_parent(dev));
 1343         char desc[100];
 1344 
 1345         /*
 1346          * Not much to do here. We always know there will be
 1347          * at least one GMAC present, and if there are two,
 1348          * mskc_attach() will create a second device instance
 1349          * for us.
 1350          */
 1351         ksnprintf(desc, sizeof(desc),
 1352             "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
 1353             model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
 1354             sc->msk_hw_rev);
 1355         device_set_desc_copy(dev, desc);
 1356 
 1357         return (0);
 1358 }
 1359 
 1360 static int
 1361 msk_attach(device_t dev)
 1362 {
 1363         struct msk_softc *sc = device_get_softc(device_get_parent(dev));
 1364         struct msk_if_softc *sc_if = device_get_softc(dev);
 1365         struct ifnet *ifp = &sc_if->arpcom.ac_if;
 1366         int i, port, error;
 1367         uint8_t eaddr[ETHER_ADDR_LEN];
 1368 
 1369         port = *(int *)device_get_ivars(dev);
 1370         KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
 1371 
 1372         kfree(device_get_ivars(dev), M_DEVBUF);
 1373         device_set_ivars(dev, NULL);
 1374 
 1375         callout_init(&sc_if->msk_tick_ch);
 1376         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1377 
 1378         sc_if->msk_if_dev = dev;
 1379         sc_if->msk_port = port;
 1380         sc_if->msk_softc = sc;
 1381         sc_if->msk_ifp = ifp;
 1382         sc_if->msk_flags = sc->msk_pflags;
 1383         sc->msk_if[port] = sc_if;
 1384 
 1385         /* Setup Tx/Rx queue register offsets. */
 1386         if (port == MSK_PORT_A) {
 1387                 sc_if->msk_txq = Q_XA1;
 1388                 sc_if->msk_txsq = Q_XS1;
 1389                 sc_if->msk_rxq = Q_R1;
 1390         } else {
 1391                 sc_if->msk_txq = Q_XA2;
 1392                 sc_if->msk_txsq = Q_XS2;
 1393                 sc_if->msk_rxq = Q_R2;
 1394         }
 1395 
 1396         error = msk_txrx_dma_alloc(sc_if);
 1397         if (error)
 1398                 goto fail;
 1399 
 1400         ifp->if_softc = sc_if;
 1401         ifp->if_mtu = ETHERMTU;
 1402         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 1403         ifp->if_init = msk_init;
 1404         ifp->if_ioctl = msk_ioctl;
 1405         ifp->if_start = msk_start;
 1406         ifp->if_watchdog = msk_watchdog;
 1407         ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
 1408         ifq_set_ready(&ifp->if_snd);
 1409 
 1410 #ifdef notyet
 1411         /*
 1412          * IFCAP_RXCSUM capability is intentionally disabled as the hardware
 1413          * has serious bug in Rx checksum offload for all Yukon II family
 1414          * hardware. It seems there is a workaround to make it work somtimes.
 1415          * However, the workaround also have to check OP code sequences to
 1416          * verify whether the OP code is correct. Sometimes it should compute
 1417          * IP/TCP/UDP checksum in driver in order to verify correctness of
 1418          * checksum computed by hardware. If you have to compute checksum
 1419          * with software to verify the hardware's checksum why have hardware
 1420          * compute the checksum? I think there is no reason to spend time to
 1421          * make Rx checksum offload work on Yukon II hardware.
 1422          */
 1423         ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
 1424                                IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
 1425         ifp->if_hwassist = MSK_CSUM_FEATURES;
 1426         ifp->if_capenable = ifp->if_capabilities;
 1427 #endif
 1428 
 1429         /*
 1430          * Get station address for this interface. Note that
 1431          * dual port cards actually come with three station
 1432          * addresses: one for each port, plus an extra. The
 1433          * extra one is used by the SysKonnect driver software
 1434          * as a 'virtual' station address for when both ports
 1435          * are operating in failover mode. Currently we don't
 1436          * use this extra address.
 1437          */
 1438         for (i = 0; i < ETHER_ADDR_LEN; i++)
 1439                 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
 1440 
 1441         sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
 1442 
 1443         /*
 1444          * Do miibus setup.
 1445          */
 1446         error = mii_phy_probe(dev, &sc_if->msk_miibus,
 1447                               msk_mediachange, msk_mediastatus);
 1448         if (error) {
 1449                 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
 1450                 goto fail;
 1451         }
 1452 
 1453         /*
 1454          * Call MI attach routine.  Can't hold locks when calling into ether_*.
 1455          */
 1456         ether_ifattach(ifp, eaddr, &sc->msk_serializer);
 1457 #if 0
 1458         /*
 1459          * Tell the upper layer(s) we support long frames.
 1460          * Must appear after the call to ether_ifattach() because
 1461          * ether_ifattach() sets ifi_hdrlen to the default value.
 1462          */
 1463         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 1464 #endif
 1465 
 1466         return 0;
 1467 fail:
 1468         msk_detach(dev);
 1469         sc->msk_if[port] = NULL;
 1470         return (error);
 1471 }
 1472 
 1473 /*
 1474  * Attach the interface. Allocate softc structures, do ifmedia
 1475  * setup and ethernet/BPF attach.
 1476  */
 1477 static int
 1478 mskc_attach(device_t dev)
 1479 {
 1480         struct msk_softc *sc;
 1481         int error, *port, cpuid;
 1482         u_int irq_flags;
 1483 
 1484         sc = device_get_softc(dev);
 1485         sc->msk_dev = dev;
 1486         lwkt_serialize_init(&sc->msk_serializer);
 1487 
 1488         /*
 1489          * Initailize sysctl variables
 1490          */
 1491         sc->msk_process_limit = mskc_process_limit;
 1492         sc->msk_intr_rate = mskc_intr_rate;
 1493 
 1494 #ifndef BURN_BRIDGES
 1495         /*
 1496          * Handle power management nonsense.
 1497          */
 1498         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
 1499                 uint32_t irq, bar0, bar1;
 1500 
 1501                 /* Save important PCI config data. */
 1502                 bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
 1503                 bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
 1504                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
 1505 
 1506                 /* Reset the power state. */
 1507                 device_printf(dev, "chip is in D%d power mode "
 1508                               "-- setting to D0\n", pci_get_powerstate(dev));
 1509 
 1510                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
 1511 
 1512                 /* Restore PCI config data. */
 1513                 pci_write_config(dev, PCIR_BAR(0), bar0, 4);
 1514                 pci_write_config(dev, PCIR_BAR(1), bar1, 4);
 1515                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
 1516         }
 1517 #endif  /* BURN_BRIDGES */
 1518 
 1519         /*
 1520          * Map control/status registers.
 1521          */
 1522         pci_enable_busmaster(dev);
 1523 
 1524         /*
 1525          * Allocate I/O resource
 1526          */
 1527 #ifdef MSK_USEIOSPACE
 1528         sc->msk_res_type = SYS_RES_IOPORT;
 1529         sc->msk_res_rid = PCIR_BAR(1);
 1530 #else
 1531         sc->msk_res_type = SYS_RES_MEMORY;
 1532         sc->msk_res_rid = PCIR_BAR(0);
 1533 #endif
 1534         sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
 1535                                              &sc->msk_res_rid, RF_ACTIVE);
 1536         if (sc->msk_res == NULL) {
 1537                 if (sc->msk_res_type == SYS_RES_MEMORY) {
 1538                         sc->msk_res_type = SYS_RES_IOPORT;
 1539                         sc->msk_res_rid = PCIR_BAR(1);
 1540                 } else {
 1541                         sc->msk_res_type = SYS_RES_MEMORY;
 1542                         sc->msk_res_rid = PCIR_BAR(0);
 1543                 }
 1544                 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
 1545                                                      &sc->msk_res_rid,
 1546                                                      RF_ACTIVE);
 1547                 if (sc->msk_res == NULL) {
 1548                         device_printf(dev, "couldn't allocate %s resources\n",
 1549                         sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
 1550                         return (ENXIO);
 1551                 }
 1552         }
 1553         sc->msk_res_bt = rman_get_bustag(sc->msk_res);
 1554         sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
 1555 
 1556         /*
 1557          * Allocate IRQ
 1558          */
 1559         sc->msk_irq_type = pci_alloc_1intr(dev, mskc_msi_enable,
 1560             &sc->msk_irq_rid, &irq_flags);
 1561 
 1562         sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->msk_irq_rid,
 1563             irq_flags);
 1564         if (sc->msk_irq == NULL) {
 1565                 device_printf(dev, "couldn't allocate IRQ resources\n");
 1566                 error = ENXIO;
 1567                 goto fail;
 1568         }
 1569 
 1570         /* Enable all clocks before accessing any registers. */
 1571         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 1572 
 1573         CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
 1574         sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
 1575         sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
 1576         /* Bail out if chip is not recognized. */
 1577         if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
 1578             sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
 1579             sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
 1580                 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
 1581                     sc->msk_hw_id, sc->msk_hw_rev);
 1582                 error = ENXIO;
 1583                 goto fail;
 1584         }
 1585 
 1586         /*
 1587          * Create sysctl tree
 1588          */
 1589         sysctl_ctx_init(&sc->msk_sysctl_ctx);
 1590         sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx,
 1591                                               SYSCTL_STATIC_CHILDREN(_hw),
 1592                                               OID_AUTO,
 1593                                               device_get_nameunit(dev),
 1594                                               CTLFLAG_RD, 0, "");
 1595         if (sc->msk_sysctl_tree == NULL) {
 1596                 device_printf(dev, "can't add sysctl node\n");
 1597                 error = ENXIO;
 1598                 goto fail;
 1599         }
 1600 
 1601         SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
 1602                         SYSCTL_CHILDREN(sc->msk_sysctl_tree),
 1603                         OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
 1604                         &sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
 1605                         "I", "max number of Rx events to process");
 1606         SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
 1607                         SYSCTL_CHILDREN(sc->msk_sysctl_tree),
 1608                         OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
 1609                         sc, 0, mskc_sysctl_intr_rate,
 1610                         "I", "max number of interrupt per second");
 1611         SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
 1612                        SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
 1613                        "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided,
 1614                        0, "# of avoided m_defrag on TX path");
 1615         SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
 1616                        SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
 1617                        "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied,
 1618                        0, "# of leading copies on TX path");
 1619         SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
 1620                        SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
 1621                        "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied,
 1622                        0, "# of trailing copies on TX path");
 1623 
 1624         sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
 1625         if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
 1626                 sc->msk_coppertype = 0;
 1627         else
 1628                 sc->msk_coppertype = 1;
 1629         /* Check number of MACs. */
 1630         sc->msk_num_port = 1;
 1631         if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
 1632             CFG_DUAL_MAC_MSK) {
 1633                 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
 1634                         sc->msk_num_port++;
 1635         }
 1636 
 1637         /* Check bus type. */
 1638         if (pci_is_pcie(sc->msk_dev) == 0) {
 1639                 sc->msk_bustype = MSK_PEX_BUS;
 1640                 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev);
 1641         } else if (pci_is_pcix(sc->msk_dev) == 0) {
 1642                 sc->msk_bustype = MSK_PCIX_BUS;
 1643                 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev);
 1644         } else {
 1645                 sc->msk_bustype = MSK_PCI_BUS;
 1646         }
 1647 
 1648         switch (sc->msk_hw_id) {
 1649         case CHIP_ID_YUKON_EC:
 1650         case CHIP_ID_YUKON_EC_U:
 1651                 sc->msk_clock = 125;    /* 125 Mhz */
 1652                 break;
 1653         case CHIP_ID_YUKON_EX:
 1654                 sc->msk_clock = 125;    /* 125 Mhz */
 1655                 break;
 1656         case CHIP_ID_YUKON_FE:
 1657                 sc->msk_clock = 100;    /* 100 Mhz */
 1658                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1659                 break;
 1660         case CHIP_ID_YUKON_FE_P:
 1661                 sc->msk_clock = 50;     /* 50 Mhz */
 1662                 /* DESCV2 */
 1663                 sc->msk_pflags |= MSK_FLAG_FASTETHER;
 1664                 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 1665                         /*
 1666                          * XXX
 1667                          * FE+ A0 has status LE writeback bug so msk(4)
 1668                          * does not rely on status word of received frame
 1669                          * in msk_rxeof() which in turn disables all
 1670                          * hardware assistance bits reported by the status
 1671                          * word as well as validity of the recevied frame.
 1672                          * Just pass received frames to upper stack with
 1673                          * minimal test and let upper stack handle them.
 1674                          */
 1675                         sc->msk_pflags |= MSK_FLAG_NORXCHK;
 1676                 }
 1677                 break;
 1678         case CHIP_ID_YUKON_XL:
 1679                 sc->msk_clock = 156;    /* 156 Mhz */
 1680                 break;
 1681         case CHIP_ID_YUKON_SUPR:
 1682                 sc->msk_clock = 125;    /* 125 MHz */
 1683                 break;
 1684         case CHIP_ID_YUKON_UL_2:
 1685                 sc->msk_clock = 125;    /* 125 Mhz */
 1686                 break;
 1687         case CHIP_ID_YUKON_OPT:
 1688                 sc->msk_clock = 125;    /* 125 MHz */
 1689                 break;
 1690         default:
 1691                 sc->msk_clock = 156;    /* 156 Mhz */
 1692                 break;
 1693         }
 1694 
 1695         error = mskc_status_dma_alloc(sc);
 1696         if (error)
 1697                 goto fail;
 1698 
 1699         /* Set base interrupt mask. */
 1700         sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
 1701         sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
 1702             Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
 1703 
 1704         /* Reset the adapter. */
 1705         mskc_reset(sc);
 1706 
 1707         error = mskc_setup_rambuffer(sc);
 1708         if (error)
 1709                 goto fail;
 1710 
 1711         sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
 1712         if (sc->msk_devs[MSK_PORT_A] == NULL) {
 1713                 device_printf(dev, "failed to add child for PORT_A\n");
 1714                 error = ENXIO;
 1715                 goto fail;
 1716         }
 1717         port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
 1718         *port = MSK_PORT_A;
 1719         device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
 1720 
 1721         if (sc->msk_num_port > 1) {
 1722                 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
 1723                 if (sc->msk_devs[MSK_PORT_B] == NULL) {
 1724                         device_printf(dev, "failed to add child for PORT_B\n");
 1725                         error = ENXIO;
 1726                         goto fail;
 1727                 }
 1728                 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
 1729                 *port = MSK_PORT_B;
 1730                 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
 1731         }
 1732 
 1733         bus_generic_attach(dev);
 1734 
 1735         cpuid = rman_get_cpuid(sc->msk_irq);
 1736         if (sc->msk_if[0] != NULL)
 1737                 ifq_set_cpuid(&sc->msk_if[0]->msk_ifp->if_snd, cpuid);
 1738         if (sc->msk_if[1] != NULL)
 1739                 ifq_set_cpuid(&sc->msk_if[1]->msk_ifp->if_snd, cpuid);
 1740 
 1741         error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
 1742                                mskc_intr, sc, &sc->msk_intrhand,
 1743                                &sc->msk_serializer);
 1744         if (error) {
 1745                 device_printf(dev, "couldn't set up interrupt handler\n");
 1746                 goto fail;
 1747         }
 1748         return 0;
 1749 fail:
 1750         mskc_detach(dev);
 1751         return (error);
 1752 }
 1753 
 1754 /*
 1755  * Shutdown hardware and free up resources. This can be called any
 1756  * time after the mutex has been initialized. It is called in both
 1757  * the error case in attach and the normal detach case so it needs
 1758  * to be careful about only freeing resources that have actually been
 1759  * allocated.
 1760  */
 1761 static int
 1762 msk_detach(device_t dev)
 1763 {
 1764         struct msk_if_softc *sc_if = device_get_softc(dev);
 1765 
 1766         if (device_is_attached(dev)) {
 1767                 struct msk_softc *sc = sc_if->msk_softc;
 1768                 struct ifnet *ifp = &sc_if->arpcom.ac_if;
 1769 
 1770                 lwkt_serialize_enter(ifp->if_serializer);
 1771 
 1772                 if (sc->msk_intrhand != NULL) {
 1773                         if (sc->msk_if[MSK_PORT_A] != NULL)
 1774                                 msk_stop(sc->msk_if[MSK_PORT_A]);
 1775                         if (sc->msk_if[MSK_PORT_B] != NULL)
 1776                                 msk_stop(sc->msk_if[MSK_PORT_B]);
 1777 
 1778                         bus_teardown_intr(sc->msk_dev, sc->msk_irq,
 1779                                           sc->msk_intrhand);
 1780                         sc->msk_intrhand = NULL;
 1781                 }
 1782 
 1783                 lwkt_serialize_exit(ifp->if_serializer);
 1784 
 1785                 ether_ifdetach(ifp);
 1786         }
 1787 
 1788         if (sc_if->msk_miibus != NULL)
 1789                 device_delete_child(dev, sc_if->msk_miibus);
 1790 
 1791         msk_txrx_dma_free(sc_if);
 1792         return (0);
 1793 }
 1794 
 1795 static int
 1796 mskc_detach(device_t dev)
 1797 {
 1798         struct msk_softc *sc = device_get_softc(dev);
 1799         int *port, i;
 1800 
 1801 #ifdef INVARIANTS
 1802         if (device_is_attached(dev)) {
 1803                 KASSERT(sc->msk_intrhand == NULL,
 1804                         ("intr is not torn down yet"));
 1805         }
 1806 #endif
 1807 
 1808         for (i = 0; i < sc->msk_num_port; ++i) {
 1809                 if (sc->msk_devs[i] != NULL) {
 1810                         port = device_get_ivars(sc->msk_devs[i]);
 1811                         if (port != NULL) {
 1812                                 kfree(port, M_DEVBUF);
 1813                                 device_set_ivars(sc->msk_devs[i], NULL);
 1814                         }
 1815                         device_delete_child(dev, sc->msk_devs[i]);
 1816                 }
 1817         }
 1818 
 1819         /* Disable all interrupts. */
 1820         CSR_WRITE_4(sc, B0_IMSK, 0);
 1821         CSR_READ_4(sc, B0_IMSK);
 1822         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 1823         CSR_READ_4(sc, B0_HWE_IMSK);
 1824 
 1825         /* LED Off. */
 1826         CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
 1827 
 1828         /* Put hardware reset. */
 1829         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 1830 
 1831         mskc_status_dma_free(sc);
 1832 
 1833         if (sc->msk_irq != NULL) {
 1834                 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
 1835                                      sc->msk_irq);
 1836         }
 1837         if (sc->msk_irq_type == PCI_INTR_TYPE_MSI)
 1838                 pci_release_msi(dev);
 1839 
 1840         if (sc->msk_res != NULL) {
 1841                 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
 1842                                      sc->msk_res);
 1843         }
 1844 
 1845         if (sc->msk_sysctl_tree != NULL)
 1846                 sysctl_ctx_free(&sc->msk_sysctl_ctx);
 1847 
 1848         return (0);
 1849 }
 1850 
 1851 /* Create status DMA region. */
 1852 static int
 1853 mskc_status_dma_alloc(struct msk_softc *sc)
 1854 {
 1855         bus_dmamem_t dmem;
 1856         int error;
 1857 
 1858         error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0,
 1859                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 1860                         MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
 1861         if (error) {
 1862                 device_printf(sc->msk_dev,
 1863                     "failed to create status coherent DMA memory\n");
 1864                 return error;
 1865         }
 1866         sc->msk_stat_tag = dmem.dmem_tag;
 1867         sc->msk_stat_map = dmem.dmem_map;
 1868         sc->msk_stat_ring = dmem.dmem_addr;
 1869         sc->msk_stat_ring_paddr = dmem.dmem_busaddr;
 1870 
 1871         return (0);
 1872 }
 1873 
 1874 static void
 1875 mskc_status_dma_free(struct msk_softc *sc)
 1876 {
 1877         /* Destroy status block. */
 1878         if (sc->msk_stat_tag) {
 1879                 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
 1880                 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
 1881                                 sc->msk_stat_map);
 1882                 bus_dma_tag_destroy(sc->msk_stat_tag);
 1883                 sc->msk_stat_tag = NULL;
 1884         }
 1885 }
 1886 
 1887 static int
 1888 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
 1889 {
 1890         int error, i, j;
 1891 #ifdef MSK_JUMBO
 1892         struct msk_rxdesc *jrxd;
 1893         struct msk_jpool_entry *entry;
 1894         uint8_t *ptr;
 1895 #endif
 1896         bus_size_t rxalign;
 1897 
 1898         /* Create parent DMA tag. */
 1899         /*
 1900          * XXX
 1901          * It seems that Yukon II supports full 64bits DMA operations. But
 1902          * it needs two descriptors(list elements) for 64bits DMA operations.
 1903          * Since we don't know what DMA address mappings(32bits or 64bits)
 1904          * would be used in advance for each mbufs, we limits its DMA space
 1905          * to be in range of 32bits address space. Otherwise, we should check
 1906          * what DMA address is used and chain another descriptor for the
 1907          * 64bits DMA operation. This also means descriptor ring size is
 1908          * variable. Limiting DMA address to be in 32bit address space greatly
 1909          * simplyfies descriptor handling and possibly would increase
 1910          * performance a bit due to efficient handling of descriptors.
 1911          * Apart from harassing checksum offloading mechanisms, it seems
 1912          * it's really bad idea to use a seperate descriptor for 64bit
 1913          * DMA operation to save small descriptor memory. Anyway, I've
 1914          * never seen these exotic scheme on ethernet interface hardware.
 1915          */
 1916         error = bus_dma_tag_create(
 1917                     NULL,                       /* parent */
 1918                     1, 0,                       /* alignment, boundary */
 1919                     BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1920                     BUS_SPACE_MAXADDR,          /* highaddr */
 1921                     NULL, NULL,                 /* filter, filterarg */
 1922                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
 1923                     0,                          /* nsegments */
 1924                     BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
 1925                     0,                          /* flags */
 1926                     &sc_if->msk_cdata.msk_parent_tag);
 1927         if (error) {
 1928                 device_printf(sc_if->msk_if_dev,
 1929                               "failed to create parent DMA tag\n");
 1930                 return error;
 1931         }
 1932 
 1933         /* Create DMA stuffs for Tx ring. */
 1934         error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
 1935                                   &sc_if->msk_cdata.msk_tx_ring_tag,
 1936                                   (void *)&sc_if->msk_rdata.msk_tx_ring,
 1937                                   &sc_if->msk_rdata.msk_tx_ring_paddr,
 1938                                   &sc_if->msk_cdata.msk_tx_ring_map);
 1939         if (error) {
 1940                 device_printf(sc_if->msk_if_dev,
 1941                               "failed to create TX ring DMA stuffs\n");
 1942                 return error;
 1943         }
 1944 
 1945         /* Create DMA stuffs for Rx ring. */
 1946         error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
 1947                                   &sc_if->msk_cdata.msk_rx_ring_tag,
 1948                                   (void *)&sc_if->msk_rdata.msk_rx_ring,
 1949                                   &sc_if->msk_rdata.msk_rx_ring_paddr,
 1950                                   &sc_if->msk_cdata.msk_rx_ring_map);
 1951         if (error) {
 1952                 device_printf(sc_if->msk_if_dev,
 1953                               "failed to create RX ring DMA stuffs\n");
 1954                 return error;
 1955         }
 1956 
 1957         /* Create tag for Tx buffers. */
 1958         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 1959                     1, 0,                       /* alignment, boundary */
 1960                     BUS_SPACE_MAXADDR,          /* lowaddr */
 1961                     BUS_SPACE_MAXADDR,          /* highaddr */
 1962                     NULL, NULL,                 /* filter, filterarg */
 1963                     MSK_JUMBO_FRAMELEN,         /* maxsize */
 1964                     MSK_MAXTXSEGS,              /* nsegments */
 1965                     MSK_MAXSGSIZE,              /* maxsegsize */
 1966                     BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
 1967                     BUS_DMA_ONEBPAGE,           /* flags */
 1968                     &sc_if->msk_cdata.msk_tx_tag);
 1969         if (error) {
 1970                 device_printf(sc_if->msk_if_dev,
 1971                               "failed to create Tx DMA tag\n");
 1972                 return error;
 1973         }
 1974 
 1975         /* Create DMA maps for Tx buffers. */
 1976         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 1977                 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
 1978 
 1979                 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag,
 1980                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
 1981                                 &txd->tx_dmamap);
 1982                 if (error) {
 1983                         device_printf(sc_if->msk_if_dev,
 1984                                       "failed to create %dth Tx dmamap\n", i);
 1985 
 1986                         for (j = 0; j < i; ++j) {
 1987                                 txd = &sc_if->msk_cdata.msk_txdesc[j];
 1988                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 1989                                                    txd->tx_dmamap);
 1990                         }
 1991                         bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 1992                         sc_if->msk_cdata.msk_tx_tag = NULL;
 1993 
 1994                         return error;
 1995                 }
 1996         }
 1997 
 1998         /*
 1999          * Workaround hardware hang which seems to happen when Rx buffer
 2000          * is not aligned on multiple of FIFO word(8 bytes).
 2001          */
 2002         if (sc_if->msk_flags & MSK_FLAG_RAMBUF)
 2003                 rxalign = MSK_RX_BUF_ALIGN;
 2004         else
 2005                 rxalign = 1;
 2006 
 2007         /* Create tag for Rx buffers. */
 2008         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2009                     rxalign, 0,                 /* alignment, boundary */
 2010                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2011                     BUS_SPACE_MAXADDR,          /* highaddr */
 2012                     NULL, NULL,                 /* filter, filterarg */
 2013                     MCLBYTES,                   /* maxsize */
 2014                     1,                          /* nsegments */
 2015                     MCLBYTES,                   /* maxsegsize */
 2016                     BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED |
 2017                     BUS_DMA_WAITOK,             /* flags */
 2018                     &sc_if->msk_cdata.msk_rx_tag);
 2019         if (error) {
 2020                 device_printf(sc_if->msk_if_dev,
 2021                               "failed to create Rx DMA tag\n");
 2022                 return error;
 2023         }
 2024 
 2025         /* Create DMA maps for Rx buffers. */
 2026         error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK,
 2027                                   &sc_if->msk_cdata.msk_rx_sparemap);
 2028         if (error) {
 2029                 device_printf(sc_if->msk_if_dev,
 2030                               "failed to create spare Rx dmamap\n");
 2031                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2032                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2033                 return error;
 2034         }
 2035         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2036                 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2037 
 2038                 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag,
 2039                                           BUS_DMA_WAITOK, &rxd->rx_dmamap);
 2040                 if (error) {
 2041                         device_printf(sc_if->msk_if_dev,
 2042                                       "failed to create %dth Rx dmamap\n", i);
 2043 
 2044                         for (j = 0; j < i; ++j) {
 2045                                 rxd = &sc_if->msk_cdata.msk_rxdesc[j];
 2046                                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2047                                                    rxd->rx_dmamap);
 2048                         }
 2049                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2050                                            sc_if->msk_cdata.msk_rx_sparemap);
 2051                         bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2052                         sc_if->msk_cdata.msk_rx_tag = NULL;
 2053 
 2054                         return error;
 2055                 }
 2056         }
 2057 
 2058 #ifdef MSK_JUMBO
 2059         SLIST_INIT(&sc_if->msk_jfree_listhead);
 2060         SLIST_INIT(&sc_if->msk_jinuse_listhead);
 2061 
 2062         /* Create tag for jumbo Rx ring. */
 2063         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2064                     MSK_RING_ALIGN, 0,          /* alignment, boundary */
 2065                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2066                     BUS_SPACE_MAXADDR,          /* highaddr */
 2067                     NULL, NULL,                 /* filter, filterarg */
 2068                     MSK_JUMBO_RX_RING_SZ,       /* maxsize */
 2069                     1,                          /* nsegments */
 2070                     MSK_JUMBO_RX_RING_SZ,       /* maxsegsize */
 2071                     0,                          /* flags */
 2072                     NULL, NULL,                 /* lockfunc, lockarg */
 2073                     &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2074         if (error != 0) {
 2075                 device_printf(sc_if->msk_if_dev,
 2076                     "failed to create jumbo Rx ring DMA tag\n");
 2077                 goto fail;
 2078         }
 2079 
 2080         /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
 2081         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2082             (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
 2083             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2084             &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2085         if (error != 0) {
 2086                 device_printf(sc_if->msk_if_dev,
 2087                     "failed to allocate DMA'able memory for jumbo Rx ring\n");
 2088                 goto fail;
 2089         }
 2090 
 2091         ctx.msk_busaddr = 0;
 2092         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2093             sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 2094             sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
 2095             msk_dmamap_cb, &ctx, 0);
 2096         if (error != 0) {
 2097                 device_printf(sc_if->msk_if_dev,
 2098                     "failed to load DMA'able memory for jumbo Rx ring\n");
 2099                 goto fail;
 2100         }
 2101         sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
 2102 
 2103         /* Create tag for jumbo buffer blocks. */
 2104         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2105                     PAGE_SIZE, 0,               /* alignment, boundary */
 2106                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2107                     BUS_SPACE_MAXADDR,          /* highaddr */
 2108                     NULL, NULL,                 /* filter, filterarg */
 2109                     MSK_JMEM,                   /* maxsize */
 2110                     1,                          /* nsegments */
 2111                     MSK_JMEM,                   /* maxsegsize */
 2112                     0,                          /* flags */
 2113                     NULL, NULL,                 /* lockfunc, lockarg */
 2114                     &sc_if->msk_cdata.msk_jumbo_tag);
 2115         if (error != 0) {
 2116                 device_printf(sc_if->msk_if_dev,
 2117                     "failed to create jumbo Rx buffer block DMA tag\n");
 2118                 goto fail;
 2119         }
 2120 
 2121         /* Create tag for jumbo Rx buffers. */
 2122         error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
 2123                     PAGE_SIZE, 0,               /* alignment, boundary */
 2124                     BUS_SPACE_MAXADDR,          /* lowaddr */
 2125                     BUS_SPACE_MAXADDR,          /* highaddr */
 2126                     NULL, NULL,                 /* filter, filterarg */
 2127                     MCLBYTES * MSK_MAXRXSEGS,   /* maxsize */
 2128                     MSK_MAXRXSEGS,              /* nsegments */
 2129                     MSK_JLEN,                   /* maxsegsize */
 2130                     0,                          /* flags */
 2131                     NULL, NULL,                 /* lockfunc, lockarg */
 2132                     &sc_if->msk_cdata.msk_jumbo_rx_tag);
 2133         if (error != 0) {
 2134                 device_printf(sc_if->msk_if_dev,
 2135                     "failed to create jumbo Rx DMA tag\n");
 2136                 goto fail;
 2137         }
 2138 
 2139         /* Create DMA maps for jumbo Rx buffers. */
 2140         if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2141             &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
 2142                 device_printf(sc_if->msk_if_dev,
 2143                     "failed to create spare jumbo Rx dmamap\n");
 2144                 goto fail;
 2145         }
 2146         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2147                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2148                 jrxd->rx_m = NULL;
 2149                 jrxd->rx_dmamap = NULL;
 2150                 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
 2151                     &jrxd->rx_dmamap);
 2152                 if (error != 0) {
 2153                         device_printf(sc_if->msk_if_dev,
 2154                             "failed to create jumbo Rx dmamap\n");
 2155                         goto fail;
 2156                 }
 2157         }
 2158 
 2159         /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
 2160         error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
 2161             (void **)&sc_if->msk_rdata.msk_jumbo_buf,
 2162             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
 2163             &sc_if->msk_cdata.msk_jumbo_map);
 2164         if (error != 0) {
 2165                 device_printf(sc_if->msk_if_dev,
 2166                     "failed to allocate DMA'able memory for jumbo buf\n");
 2167                 goto fail;
 2168         }
 2169 
 2170         ctx.msk_busaddr = 0;
 2171         error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
 2172             sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
 2173             MSK_JMEM, msk_dmamap_cb, &ctx, 0);
 2174         if (error != 0) {
 2175                 device_printf(sc_if->msk_if_dev,
 2176                     "failed to load DMA'able memory for jumbobuf\n");
 2177                 goto fail;
 2178         }
 2179         sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
 2180 
 2181         /*
 2182          * Now divide it up into 9K pieces and save the addresses
 2183          * in an array.
 2184          */
 2185         ptr = sc_if->msk_rdata.msk_jumbo_buf;
 2186         for (i = 0; i < MSK_JSLOTS; i++) {
 2187                 sc_if->msk_cdata.msk_jslots[i] = ptr;
 2188                 ptr += MSK_JLEN;
 2189                 entry = malloc(sizeof(struct msk_jpool_entry),
 2190                     M_DEVBUF, M_WAITOK);
 2191                 if (entry == NULL) {
 2192                         device_printf(sc_if->msk_if_dev,
 2193                             "no memory for jumbo buffers!\n");
 2194                         error = ENOMEM;
 2195                         goto fail;
 2196                 }
 2197                 entry->slot = i;
 2198                 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
 2199                     jpool_entries);
 2200         }
 2201 #endif
 2202         return 0;
 2203 }
 2204 
 2205 static void
 2206 msk_txrx_dma_free(struct msk_if_softc *sc_if)
 2207 {
 2208         struct msk_txdesc *txd;
 2209         struct msk_rxdesc *rxd;
 2210 #ifdef MSK_JUMBO
 2211         struct msk_rxdesc *jrxd;
 2212         struct msk_jpool_entry *entry;
 2213 #endif
 2214         int i;
 2215 
 2216 #ifdef MSK_JUMBO
 2217         MSK_JLIST_LOCK(sc_if);
 2218         while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
 2219                 device_printf(sc_if->msk_if_dev,
 2220                     "asked to free buffer that is in use!\n");
 2221                 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
 2222                 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
 2223                     jpool_entries);
 2224         }
 2225 
 2226         while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
 2227                 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
 2228                 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
 2229                 free(entry, M_DEVBUF);
 2230         }
 2231         MSK_JLIST_UNLOCK(sc_if);
 2232 
 2233         /* Destroy jumbo buffer block. */
 2234         if (sc_if->msk_cdata.msk_jumbo_map)
 2235                 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
 2236                     sc_if->msk_cdata.msk_jumbo_map);
 2237 
 2238         if (sc_if->msk_rdata.msk_jumbo_buf) {
 2239                 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
 2240                     sc_if->msk_rdata.msk_jumbo_buf,
 2241                     sc_if->msk_cdata.msk_jumbo_map);
 2242                 sc_if->msk_rdata.msk_jumbo_buf = NULL;
 2243                 sc_if->msk_cdata.msk_jumbo_map = NULL;
 2244         }
 2245 
 2246         /* Jumbo Rx ring. */
 2247         if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
 2248                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
 2249                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2250                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2251                 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
 2252                     sc_if->msk_rdata.msk_jumbo_rx_ring)
 2253                         bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 2254                             sc_if->msk_rdata.msk_jumbo_rx_ring,
 2255                             sc_if->msk_cdata.msk_jumbo_rx_ring_map);
 2256                 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
 2257                 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
 2258                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
 2259                 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
 2260         }
 2261 
 2262         /* Jumbo Rx buffers. */
 2263         if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
 2264                 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 2265                         jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 2266                         if (jrxd->rx_dmamap) {
 2267                                 bus_dmamap_destroy(
 2268                                     sc_if->msk_cdata.msk_jumbo_rx_tag,
 2269                                     jrxd->rx_dmamap);
 2270                                 jrxd->rx_dmamap = NULL;
 2271                         }
 2272                 }
 2273                 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
 2274                         bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
 2275                             sc_if->msk_cdata.msk_jumbo_rx_sparemap);
 2276                         sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
 2277                 }
 2278                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
 2279                 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
 2280         }
 2281 #endif
 2282 
 2283         /* Tx ring. */
 2284         msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
 2285                            sc_if->msk_rdata.msk_tx_ring,
 2286                            sc_if->msk_cdata.msk_tx_ring_map);
 2287 
 2288         /* Rx ring. */
 2289         msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
 2290                            sc_if->msk_rdata.msk_rx_ring,
 2291                            sc_if->msk_cdata.msk_rx_ring_map);
 2292 
 2293         /* Tx buffers. */
 2294         if (sc_if->msk_cdata.msk_tx_tag) {
 2295                 for (i = 0; i < MSK_TX_RING_CNT; i++) {
 2296                         txd = &sc_if->msk_cdata.msk_txdesc[i];
 2297                         bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
 2298                                            txd->tx_dmamap);
 2299                 }
 2300                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
 2301                 sc_if->msk_cdata.msk_tx_tag = NULL;
 2302         }
 2303 
 2304         /* Rx buffers. */
 2305         if (sc_if->msk_cdata.msk_rx_tag) {
 2306                 for (i = 0; i < MSK_RX_RING_CNT; i++) {
 2307                         rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 2308                         bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2309                                            rxd->rx_dmamap);
 2310                 }
 2311                 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
 2312                                    sc_if->msk_cdata.msk_rx_sparemap);
 2313                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
 2314                 sc_if->msk_cdata.msk_rx_tag = NULL;
 2315         }
 2316 
 2317         if (sc_if->msk_cdata.msk_parent_tag) {
 2318                 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
 2319                 sc_if->msk_cdata.msk_parent_tag = NULL;
 2320         }
 2321 }
 2322 
 2323 #ifdef MSK_JUMBO
 2324 /*
 2325  * Allocate a jumbo buffer.
 2326  */
 2327 static void *
 2328 msk_jalloc(struct msk_if_softc *sc_if)
 2329 {
 2330         struct msk_jpool_entry *entry;
 2331 
 2332         MSK_JLIST_LOCK(sc_if);
 2333 
 2334         entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
 2335 
 2336         if (entry == NULL) {
 2337                 MSK_JLIST_UNLOCK(sc_if);
 2338                 return (NULL);
 2339         }
 2340 
 2341         SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
 2342         SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
 2343 
 2344         MSK_JLIST_UNLOCK(sc_if);
 2345 
 2346         return (sc_if->msk_cdata.msk_jslots[entry->slot]);
 2347 }
 2348 
 2349 /*
 2350  * Release a jumbo buffer.
 2351  */
 2352 static void
 2353 msk_jfree(void *buf, void *args)
 2354 {
 2355         struct msk_if_softc *sc_if;
 2356         struct msk_jpool_entry *entry;
 2357         int i;
 2358 
 2359         /* Extract the softc struct pointer. */
 2360         sc_if = (struct msk_if_softc *)args;
 2361         KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
 2362 
 2363         MSK_JLIST_LOCK(sc_if);
 2364         /* Calculate the slot this buffer belongs to. */
 2365         i = ((vm_offset_t)buf
 2366              - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
 2367         KASSERT(i >= 0 && i < MSK_JSLOTS,
 2368             ("%s: asked to free buffer that we don't manage!", __func__));
 2369 
 2370         entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
 2371         KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
 2372         entry->slot = i;
 2373         SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
 2374         SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
 2375         if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
 2376                 wakeup(sc_if);
 2377 
 2378         MSK_JLIST_UNLOCK(sc_if);
 2379 }
 2380 #endif
 2381 
 2382 static int
 2383 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
 2384 {
 2385         struct msk_txdesc *txd, *txd_last;
 2386         struct msk_tx_desc *tx_le;
 2387         struct mbuf *m;
 2388         bus_dmamap_t map;
 2389         bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
 2390         uint32_t control, prod, si;
 2391         uint16_t offset, tcp_offset;
 2392         int error, i, nsegs, maxsegs, defrag;
 2393 
 2394         maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt -
 2395                   MSK_RESERVED_TX_DESC_CNT;
 2396         KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT,
 2397                 ("not enough spare TX desc"));
 2398         if (maxsegs > MSK_MAXTXSEGS)
 2399                 maxsegs = MSK_MAXTXSEGS;
 2400 
 2401         /*
 2402          * Align TX buffer to 64bytes boundary.  This greately improves
 2403          * bulk data TX performance on my 88E8053 (+100Mbps) at least.
 2404          * Try avoiding m_defrag(), if the mbufs are not chained together
 2405          * by m_next (i.e. m->m_len == m->m_pkthdr.len).
 2406          */
 2407 
 2408 #define MSK_TXBUF_ALIGN 64
 2409 #define MSK_TXBUF_MASK  (MSK_TXBUF_ALIGN - 1)
 2410 
 2411         defrag = 1;
 2412         m = *m_head;
 2413         if (m->m_len == m->m_pkthdr.len) {
 2414                 int space;
 2415 
 2416                 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK);
 2417                 if (space) {
 2418                         if (M_WRITABLE(m)) {
 2419                                 if (M_TRAILINGSPACE(m) >= space) {
 2420                                         /* e.g. TCP ACKs */
 2421                                         bcopy(m->m_data, m->m_data + space,
 2422                                               m->m_len);
 2423                                         m->m_data += space;
 2424                                         defrag = 0;
 2425                                         sc_if->msk_softc->msk_trailing_copied++;
 2426                                 } else {
 2427                                         space = MSK_TXBUF_ALIGN - space;
 2428                                         if (M_LEADINGSPACE(m) >= space) {
 2429                                                 /* e.g. Small UDP datagrams */
 2430                                                 bcopy(m->m_data,
 2431                                                       m->m_data - space,
 2432                                                       m->m_len);
 2433                                                 m->m_data -= space;
 2434                                                 defrag = 0;
 2435                                                 sc_if->msk_softc->
 2436                                                 msk_leading_copied++;
 2437                                         }
 2438                                 }
 2439                         }
 2440                 } else {
 2441                         /* e.g. on forwarding path */
 2442                         defrag = 0;
 2443                 }
 2444         }
 2445         if (defrag) {
 2446                 m = m_defrag(*m_head, MB_DONTWAIT);
 2447                 if (m == NULL) {
 2448                         m_freem(*m_head);
 2449                         *m_head = NULL;
 2450                         return ENOBUFS;
 2451                 }
 2452                 *m_head = m;
 2453         } else {
 2454                 sc_if->msk_softc->msk_defrag_avoided++;
 2455         }
 2456 
 2457 #undef MSK_TXBUF_MASK
 2458 #undef MSK_TXBUF_ALIGN
 2459 
 2460         tcp_offset = offset = 0;
 2461         if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
 2462                 /*
 2463                  * Since mbuf has no protocol specific structure information
 2464                  * in it we have to inspect protocol information here to
 2465                  * setup TSO and checksum offload. I don't know why Marvell
 2466                  * made a such decision in chip design because other GigE
 2467                  * hardwares normally takes care of all these chores in
 2468                  * hardware. However, TSO performance of Yukon II is very
 2469                  * good such that it's worth to implement it.
 2470                  */
 2471                 struct ether_header *eh;
 2472                 struct ip *ip;
 2473 
 2474                 /* TODO check for M_WRITABLE(m) */
 2475 
 2476                 offset = sizeof(struct ether_header);
 2477                 m = m_pullup(m, offset);
 2478                 if (m == NULL) {
 2479                         *m_head = NULL;
 2480                         return (ENOBUFS);
 2481                 }
 2482                 eh = mtod(m, struct ether_header *);
 2483                 /* Check if hardware VLAN insertion is off. */
 2484                 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
 2485                         offset = sizeof(struct ether_vlan_header);
 2486                         m = m_pullup(m, offset);
 2487                         if (m == NULL) {
 2488                                 *m_head = NULL;
 2489                                 return (ENOBUFS);
 2490                         }
 2491                 }
 2492                 m = m_pullup(m, offset + sizeof(struct ip));
 2493                 if (m == NULL) {
 2494                         *m_head = NULL;
 2495                         return (ENOBUFS);
 2496                 }
 2497                 ip = (struct ip *)(mtod(m, char *) + offset);
 2498                 offset += (ip->ip_hl << 2);
 2499                 tcp_offset = offset;
 2500                 /*
 2501                  * It seems that Yukon II has Tx checksum offload bug for
 2502                  * small TCP packets that's less than 60 bytes in size
 2503                  * (e.g. TCP window probe packet, pure ACK packet).
 2504                  * Common work around like padding with zeros to make the
 2505                  * frame minimum ethernet frame size didn't work at all.
 2506                  * Instead of disabling checksum offload completely we
 2507                  * resort to S/W checksum routine when we encounter short
 2508                  * TCP frames.
 2509                  * Short UDP packets appear to be handled correctly by
 2510                  * Yukon II.
 2511                  */
 2512                 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
 2513                     (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
 2514                         uint16_t csum;
 2515 
 2516                         csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
 2517                             (ip->ip_hl << 2), offset);
 2518                         *(uint16_t *)(m->m_data + offset +
 2519                             m->m_pkthdr.csum_data) = csum;
 2520                         m->m_pkthdr.csum_flags &= ~CSUM_TCP;
 2521                 }
 2522                 *m_head = m;
 2523         }
 2524 
 2525         prod = sc_if->msk_cdata.msk_tx_prod;
 2526         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2527         txd_last = txd;
 2528         map = txd->tx_dmamap;
 2529 
 2530         error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map,
 2531                         m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
 2532         if (error) {
 2533                 m_freem(*m_head);
 2534                 *m_head = NULL;
 2535                 return error;
 2536         }
 2537         bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
 2538 
 2539         m = *m_head;
 2540         control = 0;
 2541         tx_le = NULL;
 2542 
 2543 #ifdef notyet
 2544         /* Check if we have a VLAN tag to insert. */
 2545         if ((m->m_flags & M_VLANTAG) != 0) {
 2546                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2547                 tx_le->msk_addr = htole32(0);
 2548                 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
 2549                     htons(m->m_pkthdr.ether_vtag));
 2550                 sc_if->msk_cdata.msk_tx_cnt++;
 2551                 MSK_INC(prod, MSK_TX_RING_CNT);
 2552                 control |= INS_VLAN;
 2553         }
 2554 #endif
 2555         /* Check if we have to handle checksum offload. */
 2556         if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
 2557                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2558                 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
 2559                     & 0xffff) | ((uint32_t)tcp_offset << 16));
 2560                 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
 2561                 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
 2562                 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
 2563                         control |= UDPTCP;
 2564                 sc_if->msk_cdata.msk_tx_cnt++;
 2565                 MSK_INC(prod, MSK_TX_RING_CNT);
 2566         }
 2567 
 2568         si = prod;
 2569         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2570         tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
 2571         tx_le->msk_control = htole32(txsegs[0].ds_len | control |
 2572             OP_PACKET);
 2573         sc_if->msk_cdata.msk_tx_cnt++;
 2574         MSK_INC(prod, MSK_TX_RING_CNT);
 2575 
 2576         for (i = 1; i < nsegs; i++) {
 2577                 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2578                 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
 2579                 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
 2580                     OP_BUFFER | HW_OWNER);
 2581                 sc_if->msk_cdata.msk_tx_cnt++;
 2582                 MSK_INC(prod, MSK_TX_RING_CNT);
 2583         }
 2584         /* Update producer index. */
 2585         sc_if->msk_cdata.msk_tx_prod = prod;
 2586 
 2587         /* Set EOP on the last desciptor. */
 2588         prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
 2589         tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
 2590         tx_le->msk_control |= htole32(EOP);
 2591 
 2592         /* Turn the first descriptor ownership to hardware. */
 2593         tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
 2594         tx_le->msk_control |= htole32(HW_OWNER);
 2595 
 2596         txd = &sc_if->msk_cdata.msk_txdesc[prod];
 2597         map = txd_last->tx_dmamap;
 2598         txd_last->tx_dmamap = txd->tx_dmamap;
 2599         txd->tx_dmamap = map;
 2600         txd->tx_m = m;
 2601 
 2602         return (0);
 2603 }
 2604 
 2605 static void
 2606 msk_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
 2607 {
 2608         struct msk_if_softc *sc_if;
 2609         struct mbuf *m_head;
 2610         int enq;
 2611 
 2612         sc_if = ifp->if_softc;
 2613 
 2614         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
 2615         ASSERT_SERIALIZED(ifp->if_serializer);
 2616 
 2617         if (!sc_if->msk_link) {
 2618                 ifq_purge(&ifp->if_snd);
 2619                 return;
 2620         }
 2621 
 2622         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
 2623                 return;
 2624 
 2625         enq = 0;
 2626         while (!ifq_is_empty(&ifp->if_snd)) {
 2627                 if (MSK_IS_OACTIVE(sc_if)) {
 2628                         ifq_set_oactive(&ifp->if_snd);
 2629                         break;
 2630                 }
 2631 
 2632                 m_head = ifq_dequeue(&ifp->if_snd);
 2633                 if (m_head == NULL)
 2634                         break;
 2635 
 2636                 /*
 2637                  * Pack the data into the transmit ring. If we
 2638                  * don't have room, set the OACTIVE flag and wait
 2639                  * for the NIC to drain the ring.
 2640                  */
 2641                 if (msk_encap(sc_if, &m_head) != 0) {
 2642                         IFNET_STAT_INC(ifp, oerrors, 1);
 2643                         if (sc_if->msk_cdata.msk_tx_cnt == 0) {
 2644                                 continue;
 2645                         } else {
 2646                                 ifq_set_oactive(&ifp->if_snd);
 2647                                 break;
 2648                         }
 2649                 }
 2650                 enq = 1;
 2651 
 2652                 /*
 2653                  * If there's a BPF listener, bounce a copy of this frame
 2654                  * to him.
 2655                  */
 2656                 BPF_MTAP(ifp, m_head);
 2657         }
 2658 
 2659         if (enq) {
 2660                 /* Transmit */
 2661                 CSR_WRITE_2(sc_if->msk_softc,
 2662                     Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
 2663                     sc_if->msk_cdata.msk_tx_prod);
 2664 
 2665                 /* Set a timeout in case the chip goes out to lunch. */
 2666                 ifp->if_timer = MSK_TX_TIMEOUT;
 2667         }
 2668 }
 2669 
 2670 static void
 2671 msk_watchdog(struct ifnet *ifp)
 2672 {
 2673         struct msk_if_softc *sc_if = ifp->if_softc;
 2674         uint32_t ridx;
 2675         int idx;
 2676 
 2677         ASSERT_SERIALIZED(ifp->if_serializer);
 2678 
 2679         if (sc_if->msk_link == 0) {
 2680                 if (bootverbose)
 2681                         if_printf(sc_if->msk_ifp, "watchdog timeout "
 2682                            "(missed link)\n");
 2683                 IFNET_STAT_INC(ifp, oerrors, 1);
 2684                 msk_init(sc_if);
 2685                 return;
 2686         }
 2687 
 2688         /*
 2689          * Reclaim first as there is a possibility of losing Tx completion
 2690          * interrupts.
 2691          */
 2692         ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
 2693         idx = CSR_READ_2(sc_if->msk_softc, ridx);
 2694         if (sc_if->msk_cdata.msk_tx_cons != idx) {
 2695                 msk_txeof(sc_if, idx);
 2696                 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
 2697                         if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
 2698                             "-- recovering\n");
 2699                         if (!ifq_is_empty(&ifp->if_snd))
 2700                                 if_devstart(ifp);
 2701                         return;
 2702                 }
 2703         }
 2704 
 2705         if_printf(ifp, "watchdog timeout\n");
 2706         IFNET_STAT_INC(ifp, oerrors, 1);
 2707         msk_init(sc_if);
 2708         if (!ifq_is_empty(&ifp->if_snd))
 2709                 if_devstart(ifp);
 2710 }
 2711 
 2712 static int
 2713 mskc_shutdown(device_t dev)
 2714 {
 2715         struct msk_softc *sc = device_get_softc(dev);
 2716         int i;
 2717 
 2718         lwkt_serialize_enter(&sc->msk_serializer);
 2719 
 2720         for (i = 0; i < sc->msk_num_port; i++) {
 2721                 if (sc->msk_if[i] != NULL)
 2722                         msk_stop(sc->msk_if[i]);
 2723         }
 2724 
 2725         /* Put hardware reset. */
 2726         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2727 
 2728         lwkt_serialize_exit(&sc->msk_serializer);
 2729         return (0);
 2730 }
 2731 
 2732 static int
 2733 mskc_suspend(device_t dev)
 2734 {
 2735         struct msk_softc *sc = device_get_softc(dev);
 2736         int i;
 2737 
 2738         lwkt_serialize_enter(&sc->msk_serializer);
 2739 
 2740         for (i = 0; i < sc->msk_num_port; i++) {
 2741                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 2742                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
 2743                         msk_stop(sc->msk_if[i]);
 2744         }
 2745 
 2746         /* Disable all interrupts. */
 2747         CSR_WRITE_4(sc, B0_IMSK, 0);
 2748         CSR_READ_4(sc, B0_IMSK);
 2749         CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
 2750         CSR_READ_4(sc, B0_HWE_IMSK);
 2751 
 2752         mskc_phy_power(sc, MSK_PHY_POWERDOWN);
 2753 
 2754         /* Put hardware reset. */
 2755         CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
 2756         sc->msk_suspended = 1;
 2757 
 2758         lwkt_serialize_exit(&sc->msk_serializer);
 2759 
 2760         return (0);
 2761 }
 2762 
 2763 static int
 2764 mskc_resume(device_t dev)
 2765 {
 2766         struct msk_softc *sc = device_get_softc(dev);
 2767         int i;
 2768 
 2769         lwkt_serialize_enter(&sc->msk_serializer);
 2770 
 2771         /* Enable all clocks before accessing any registers. */
 2772         CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
 2773         mskc_reset(sc);
 2774         for (i = 0; i < sc->msk_num_port; i++) {
 2775                 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
 2776                     ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
 2777                         msk_init(sc->msk_if[i]);
 2778         }
 2779         sc->msk_suspended = 0;
 2780 
 2781         lwkt_serialize_exit(&sc->msk_serializer);
 2782 
 2783         return (0);
 2784 }
 2785 
 2786 static void
 2787 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
 2788 {
 2789         struct mbuf *m;
 2790         struct ifnet *ifp;
 2791         struct msk_rxdesc *rxd;
 2792         int cons, rxlen;
 2793 
 2794         ifp = sc_if->msk_ifp;
 2795 
 2796         cons = sc_if->msk_cdata.msk_rx_cons;
 2797         do {
 2798                 rxlen = status >> 16;
 2799                 if ((status & GMR_FS_VLAN) != 0 &&
 2800                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2801                         rxlen -= EVL_ENCAPLEN;
 2802                 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) {
 2803                         /*
 2804                          * For controllers that returns bogus status code
 2805                          * just do minimal check and let upper stack
 2806                          * handle this frame.
 2807                          */
 2808                         if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
 2809                                 IFNET_STAT_INC(ifp, ierrors, 1);
 2810                                 msk_discard_rxbuf(sc_if, cons);
 2811                                 break;
 2812                         }
 2813                 } else if (len > sc_if->msk_framesize ||
 2814                     ((status & GMR_FS_ANY_ERR) != 0) ||
 2815                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 2816                         /* Don't count flow-control packet as errors. */
 2817                         if ((status & GMR_FS_GOOD_FC) == 0)
 2818                                 IFNET_STAT_INC(ifp, ierrors, 1);
 2819                         msk_discard_rxbuf(sc_if, cons);
 2820                         break;
 2821                 }
 2822                 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
 2823                 m = rxd->rx_m;
 2824                 if (msk_newbuf(sc_if, cons, 0) != 0) {
 2825                         IFNET_STAT_INC(ifp, iqdrops, 1);
 2826                         /* Reuse old buffer. */
 2827                         msk_discard_rxbuf(sc_if, cons);
 2828                         break;
 2829                 }
 2830                 m->m_pkthdr.rcvif = ifp;
 2831                 m->m_pkthdr.len = m->m_len = len;
 2832                 IFNET_STAT_INC(ifp, ipackets, 1);
 2833 #ifdef notyet
 2834                 /* Check for VLAN tagged packets. */
 2835                 if ((status & GMR_FS_VLAN) != 0 &&
 2836                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 2837                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 2838                         m->m_flags |= M_VLANTAG;
 2839                 }
 2840 #endif
 2841 
 2842                 ifp->if_input(ifp, m);
 2843         } while (0);
 2844 
 2845         MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
 2846         MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
 2847 }
 2848 
 2849 #ifdef MSK_JUMBO
 2850 static void
 2851 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
 2852 {
 2853         struct mbuf *m;
 2854         struct ifnet *ifp;
 2855         struct msk_rxdesc *jrxd;
 2856         int cons, rxlen;
 2857 
 2858         ifp = sc_if->msk_ifp;
 2859 
 2860         MSK_IF_LOCK_ASSERT(sc_if);
 2861 
 2862         cons = sc_if->msk_cdata.msk_rx_cons;
 2863         do {
 2864                 rxlen = status >> 16;
 2865                 if ((status & GMR_FS_VLAN) != 0 &&
 2866                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
 2867                         rxlen -= ETHER_VLAN_ENCAP_LEN;
 2868                 if (len > sc_if->msk_framesize ||
 2869                     ((status & GMR_FS_ANY_ERR) != 0) ||
 2870                     ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
 2871                         /* Don't count flow-control packet as errors. */
 2872                         if ((status & GMR_FS_GOOD_FC) == 0)
 2873                                 ifp->if_ierrors++;
 2874                         msk_discard_jumbo_rxbuf(sc_if, cons);
 2875                         break;
 2876                 }
 2877                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
 2878                 m = jrxd->rx_m;
 2879                 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
 2880                         ifp->if_iqdrops++;
 2881                         /* Reuse old buffer. */
 2882                         msk_discard_jumbo_rxbuf(sc_if, cons);
 2883                         break;
 2884                 }
 2885                 m->m_pkthdr.rcvif = ifp;
 2886                 m->m_pkthdr.len = m->m_len = len;
 2887                 ifp->if_ipackets++;
 2888                 /* Check for VLAN tagged packets. */
 2889                 if ((status & GMR_FS_VLAN) != 0 &&
 2890                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
 2891                         m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
 2892                         m->m_flags |= M_VLANTAG;
 2893                 }
 2894                 MSK_IF_UNLOCK(sc_if);
 2895                 (*ifp->if_input)(ifp, m);
 2896                 MSK_IF_LOCK(sc_if);
 2897         } while (0);
 2898 
 2899         MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
 2900         MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
 2901 }
 2902 #endif
 2903 
 2904 static void
 2905 msk_txeof(struct msk_if_softc *sc_if, int idx)
 2906 {
 2907         struct msk_txdesc *txd;
 2908         struct msk_tx_desc *cur_tx;
 2909         struct ifnet *ifp;
 2910         uint32_t control;
 2911         int cons, prog;
 2912 
 2913         ifp = sc_if->msk_ifp;
 2914 
 2915         /*
 2916          * Go through our tx ring and free mbufs for those
 2917          * frames that have been sent.
 2918          */
 2919         cons = sc_if->msk_cdata.msk_tx_cons;
 2920         prog = 0;
 2921         for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
 2922                 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
 2923                         break;
 2924                 prog++;
 2925                 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
 2926                 control = le32toh(cur_tx->msk_control);
 2927                 sc_if->msk_cdata.msk_tx_cnt--;
 2928                 if ((control & EOP) == 0)
 2929                         continue;
 2930                 txd = &sc_if->msk_cdata.msk_txdesc[cons];
 2931                 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
 2932 
 2933                 IFNET_STAT_INC(ifp, opackets, 1);
 2934                 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
 2935                     __func__));
 2936                 m_freem(txd->tx_m);
 2937                 txd->tx_m = NULL;
 2938         }
 2939 
 2940         if (prog > 0) {
 2941                 sc_if->msk_cdata.msk_tx_cons = cons;
 2942                 if (!MSK_IS_OACTIVE(sc_if))
 2943                         ifq_clr_oactive(&ifp->if_snd);
 2944                 if (sc_if->msk_cdata.msk_tx_cnt == 0)
 2945                         ifp->if_timer = 0;
 2946                 /* No need to sync LEs as we didn't update LEs. */
 2947         }
 2948 }
 2949 
 2950 static void
 2951 msk_tick(void *xsc_if)
 2952 {
 2953         struct msk_if_softc *sc_if = xsc_if;
 2954         struct ifnet *ifp = &sc_if->arpcom.ac_if;
 2955         struct mii_data *mii;
 2956 
 2957         lwkt_serialize_enter(ifp->if_serializer);
 2958 
 2959         mii = device_get_softc(sc_if->msk_miibus);
 2960 
 2961         mii_tick(mii);
 2962         if (!sc_if->msk_link)
 2963                 msk_miibus_statchg(sc_if->msk_if_dev);
 2964         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 2965 
 2966         lwkt_serialize_exit(ifp->if_serializer);
 2967 }
 2968 
 2969 static void
 2970 msk_intr_phy(struct msk_if_softc *sc_if)
 2971 {
 2972         uint16_t status;
 2973 
 2974         msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 2975         status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
 2976         /* Handle FIFO Underrun/Overflow? */
 2977         if (status & PHY_M_IS_FIFO_ERROR) {
 2978                 device_printf(sc_if->msk_if_dev,
 2979                     "PHY FIFO underrun/overflow.\n");
 2980         }
 2981 }
 2982 
 2983 static void
 2984 msk_intr_gmac(struct msk_if_softc *sc_if)
 2985 {
 2986         struct msk_softc *sc;
 2987         uint8_t status;
 2988 
 2989         sc = sc_if->msk_softc;
 2990         status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 2991 
 2992         /* GMAC Rx FIFO overrun. */
 2993         if ((status & GM_IS_RX_FF_OR) != 0) {
 2994                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 2995                     GMF_CLI_RX_FO);
 2996         }
 2997         /* GMAC Tx FIFO underrun. */
 2998         if ((status & GM_IS_TX_FF_UR) != 0) {
 2999                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3000                     GMF_CLI_TX_FU);
 3001                 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
 3002                 /*
 3003                  * XXX
 3004                  * In case of Tx underrun, we may need to flush/reset
 3005                  * Tx MAC but that would also require resynchronization
 3006                  * with status LEs. Reintializing status LEs would
 3007                  * affect other port in dual MAC configuration so it
 3008                  * should be avoided as possible as we can.
 3009                  * Due to lack of documentation it's all vague guess but
 3010                  * it needs more investigation.
 3011                  */
 3012         }
 3013 }
 3014 
 3015 static void
 3016 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
 3017 {
 3018         struct msk_softc *sc;
 3019 
 3020         sc = sc_if->msk_softc;
 3021         if ((status & Y2_IS_PAR_RD1) != 0) {
 3022                 device_printf(sc_if->msk_if_dev,
 3023                     "RAM buffer read parity error\n");
 3024                 /* Clear IRQ. */
 3025                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3026                     RI_CLR_RD_PERR);
 3027         }
 3028         if ((status & Y2_IS_PAR_WR1) != 0) {
 3029                 device_printf(sc_if->msk_if_dev,
 3030                     "RAM buffer write parity error\n");
 3031                 /* Clear IRQ. */
 3032                 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
 3033                     RI_CLR_WR_PERR);
 3034         }
 3035         if ((status & Y2_IS_PAR_MAC1) != 0) {
 3036                 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
 3037                 /* Clear IRQ. */
 3038                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3039                     GMF_CLI_TX_PE);
 3040         }
 3041         if ((status & Y2_IS_PAR_RX1) != 0) {
 3042                 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
 3043                 /* Clear IRQ. */
 3044                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
 3045         }
 3046         if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
 3047                 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
 3048                 /* Clear IRQ. */
 3049                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
 3050         }
 3051 }
 3052 
 3053 static void
 3054 mskc_intr_hwerr(struct msk_softc *sc)
 3055 {
 3056         uint32_t status;
 3057         uint32_t tlphead[4];
 3058 
 3059         status = CSR_READ_4(sc, B0_HWE_ISRC);
 3060         /* Time Stamp timer overflow. */
 3061         if ((status & Y2_IS_TIST_OV) != 0)
 3062                 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
 3063         if ((status & Y2_IS_PCI_NEXP) != 0) {
 3064                 /*
 3065                  * PCI Express Error occured which is not described in PEX
 3066                  * spec.
 3067                  * This error is also mapped either to Master Abort(
 3068                  * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
 3069                  * can only be cleared there.
 3070                  */
 3071                 device_printf(sc->msk_dev,
 3072                     "PCI Express protocol violation error\n");
 3073         }
 3074 
 3075         if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
 3076                 uint16_t v16;
 3077 
 3078                 if ((status & Y2_IS_MST_ERR) != 0)
 3079                         device_printf(sc->msk_dev,
 3080                             "unexpected IRQ Status error\n");
 3081                 else
 3082                         device_printf(sc->msk_dev,
 3083                             "unexpected IRQ Master error\n");
 3084                 /* Reset all bits in the PCI status register. */
 3085                 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
 3086                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3087                 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
 3088                     PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
 3089                     PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
 3090                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3091         }
 3092 
 3093         /* Check for PCI Express Uncorrectable Error. */
 3094         if ((status & Y2_IS_PCI_EXP) != 0) {
 3095                 uint32_t v32;
 3096 
 3097                 /*
 3098                  * On PCI Express bus bridges are called root complexes (RC).
 3099                  * PCI Express errors are recognized by the root complex too,
 3100                  * which requests the system to handle the problem. After
 3101                  * error occurence it may be that no access to the adapter
 3102                  * may be performed any longer.
 3103                  */
 3104 
 3105                 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
 3106                 if ((v32 & PEX_UNSUP_REQ) != 0) {
 3107                         /* Ignore unsupported request error. */
 3108                         if (bootverbose) {
 3109                                 device_printf(sc->msk_dev,
 3110                                     "Uncorrectable PCI Express error\n");
 3111                         }
 3112                 }
 3113                 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
 3114                         int i;
 3115 
 3116                         /* Get TLP header form Log Registers. */
 3117                         for (i = 0; i < 4; i++)
 3118                                 tlphead[i] = CSR_PCI_READ_4(sc,
 3119                                     PEX_HEADER_LOG + i * 4);
 3120                         /* Check for vendor defined broadcast message. */
 3121                         if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
 3122                                 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
 3123                                 CSR_WRITE_4(sc, B0_HWE_IMSK,
 3124                                     sc->msk_intrhwemask);
 3125                                 CSR_READ_4(sc, B0_HWE_IMSK);
 3126                         }
 3127                 }
 3128                 /* Clear the interrupt. */
 3129                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 3130                 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
 3131                 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 3132         }
 3133 
 3134         if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
 3135                 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
 3136         if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
 3137                 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
 3138 }
 3139 
 3140 static __inline void
 3141 msk_rxput(struct msk_if_softc *sc_if)
 3142 {
 3143         struct msk_softc *sc;
 3144 
 3145         sc = sc_if->msk_softc;
 3146 #ifdef MSK_JUMBO
 3147         if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
 3148                 bus_dmamap_sync(
 3149                     sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
 3150                     sc_if->msk_cdata.msk_jumbo_rx_ring_map,
 3151                     BUS_DMASYNC_PREWRITE);
 3152         }
 3153 #endif
 3154         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
 3155             PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
 3156 }
 3157 
 3158 static int
 3159 mskc_handle_events(struct msk_softc *sc)
 3160 {
 3161         struct msk_if_softc *sc_if;
 3162         int rxput[2];
 3163         struct msk_stat_desc *sd;
 3164         uint32_t control, status;
 3165         int cons, idx, len, port, rxprog;
 3166 
 3167         idx = CSR_READ_2(sc, STAT_PUT_IDX);
 3168         if (idx == sc->msk_stat_cons)
 3169                 return (0);
 3170 
 3171         rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
 3172 
 3173         rxprog = 0;
 3174         for (cons = sc->msk_stat_cons; cons != idx;) {
 3175                 sd = &sc->msk_stat_ring[cons];
 3176                 control = le32toh(sd->msk_control);
 3177                 if ((control & HW_OWNER) == 0)
 3178                         break;
 3179                 /*
 3180                  * Marvell's FreeBSD driver updates status LE after clearing
 3181                  * HW_OWNER. However we don't have a way to sync single LE
 3182                  * with bus_dma(9) API. bus_dma(9) provides a way to sync
 3183                  * an entire DMA map. So don't sync LE until we have a better
 3184                  * way to sync LEs.
 3185                  */
 3186                 control &= ~HW_OWNER;
 3187                 sd->msk_control = htole32(control);
 3188                 status = le32toh(sd->msk_status);
 3189                 len = control & STLE_LEN_MASK;
 3190                 port = (control >> 16) & 0x01;
 3191                 sc_if = sc->msk_if[port];
 3192                 if (sc_if == NULL) {
 3193                         device_printf(sc->msk_dev, "invalid port opcode "
 3194                             "0x%08x\n", control & STLE_OP_MASK);
 3195                         continue;
 3196                 }
 3197 
 3198                 switch (control & STLE_OP_MASK) {
 3199                 case OP_RXVLAN:
 3200                         sc_if->msk_vtag = ntohs(len);
 3201                         break;
 3202                 case OP_RXCHKSVLAN:
 3203                         sc_if->msk_vtag = ntohs(len);
 3204                         break;
 3205                 case OP_RXSTAT:
 3206                         if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0)
 3207                                 break;
 3208 #ifdef MSK_JUMBO
 3209                         if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
 3210                                 msk_jumbo_rxeof(sc_if, status, len);
 3211                         else
 3212 #endif
 3213                                 msk_rxeof(sc_if, status, len);
 3214                         rxprog++;
 3215                         /*
 3216                          * Because there is no way to sync single Rx LE
 3217                          * put the DMA sync operation off until the end of
 3218                          * event processing.
 3219                          */
 3220                         rxput[port]++;
 3221                         /* Update prefetch unit if we've passed water mark. */
 3222                         if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
 3223                                 msk_rxput(sc_if);
 3224                                 rxput[port] = 0;
 3225                         }
 3226                         break;
 3227                 case OP_TXINDEXLE:
 3228                         if (sc->msk_if[MSK_PORT_A] != NULL) {
 3229                                 msk_txeof(sc->msk_if[MSK_PORT_A],
 3230                                     status & STLE_TXA1_MSKL);
 3231                         }
 3232                         if (sc->msk_if[MSK_PORT_B] != NULL) {
 3233                                 msk_txeof(sc->msk_if[MSK_PORT_B],
 3234                                     ((status & STLE_TXA2_MSKL) >>
 3235                                     STLE_TXA2_SHIFTL) |
 3236                                     ((len & STLE_TXA2_MSKH) <<
 3237                                     STLE_TXA2_SHIFTH));
 3238                         }
 3239                         break;
 3240                 default:
 3241                         device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
 3242                             control & STLE_OP_MASK);
 3243                         break;
 3244                 }
 3245                 MSK_INC(cons, MSK_STAT_RING_CNT);
 3246                 if (rxprog > sc->msk_process_limit)
 3247                         break;
 3248         }
 3249 
 3250         sc->msk_stat_cons = cons;
 3251         /* XXX We should sync status LEs here. See above notes. */
 3252 
 3253         if (rxput[MSK_PORT_A] > 0)
 3254                 msk_rxput(sc->msk_if[MSK_PORT_A]);
 3255         if (rxput[MSK_PORT_B] > 0)
 3256                 msk_rxput(sc->msk_if[MSK_PORT_B]);
 3257 
 3258         return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
 3259 }
 3260 
 3261 /* Legacy interrupt handler for shared interrupt. */
 3262 static void
 3263 mskc_intr(void *xsc)
 3264 {
 3265         struct msk_softc *sc;
 3266         struct msk_if_softc *sc_if0, *sc_if1;
 3267         struct ifnet *ifp0, *ifp1;
 3268         uint32_t status;
 3269 
 3270         sc = xsc;
 3271         ASSERT_SERIALIZED(&sc->msk_serializer);
 3272 
 3273         /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
 3274         status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
 3275         if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
 3276             (status & sc->msk_intrmask) == 0) {
 3277                 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3278                 return;
 3279         }
 3280 
 3281         sc_if0 = sc->msk_if[MSK_PORT_A];
 3282         sc_if1 = sc->msk_if[MSK_PORT_B];
 3283         ifp0 = ifp1 = NULL;
 3284         if (sc_if0 != NULL)
 3285                 ifp0 = sc_if0->msk_ifp;
 3286         if (sc_if1 != NULL)
 3287                 ifp1 = sc_if1->msk_ifp;
 3288 
 3289         if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
 3290                 msk_intr_phy(sc_if0);
 3291         if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
 3292                 msk_intr_phy(sc_if1);
 3293         if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
 3294                 msk_intr_gmac(sc_if0);
 3295         if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
 3296                 msk_intr_gmac(sc_if1);
 3297         if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
 3298                 device_printf(sc->msk_dev, "Rx descriptor error\n");
 3299                 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
 3300                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3301                 CSR_READ_4(sc, B0_IMSK);
 3302         }
 3303         if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
 3304                 device_printf(sc->msk_dev, "Tx descriptor error\n");
 3305                 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
 3306                 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3307                 CSR_READ_4(sc, B0_IMSK);
 3308         }
 3309         if ((status & Y2_IS_HW_ERR) != 0)
 3310                 mskc_intr_hwerr(sc);
 3311 
 3312         while (mskc_handle_events(sc) != 0)
 3313                 ;
 3314         if ((status & Y2_IS_STAT_BMU) != 0)
 3315                 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
 3316 
 3317         /* Reenable interrupts. */
 3318         CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
 3319 
 3320         if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
 3321             !ifq_is_empty(&ifp0->if_snd))
 3322                 if_devstart(ifp0);
 3323         if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
 3324             !ifq_is_empty(&ifp1->if_snd))
 3325                 if_devstart(ifp1);
 3326 }
 3327 
 3328 static void
 3329 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
 3330 {
 3331         struct msk_softc *sc = sc_if->msk_softc;
 3332         struct ifnet *ifp = sc_if->msk_ifp;
 3333 
 3334         if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
 3335             sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
 3336             sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
 3337                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3338                     TX_STFW_ENA);
 3339         } else {
 3340                 if (ifp->if_mtu > ETHERMTU) {
 3341                         /* Set Tx GMAC FIFO Almost Empty Threshold. */
 3342                         CSR_WRITE_4(sc,
 3343                             MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
 3344                             MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
 3345                         /* Disable Store & Forward mode for Tx. */
 3346                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3347                             TX_STFW_DIS);
 3348                 } else {
 3349                         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
 3350                             TX_STFW_ENA);
 3351                 }
 3352         }
 3353 }
 3354 
 3355 static void
 3356 msk_init(void *xsc)
 3357 {
 3358         struct msk_if_softc *sc_if = xsc;
 3359         struct msk_softc *sc = sc_if->msk_softc;
 3360         struct ifnet *ifp = sc_if->msk_ifp;
 3361         struct mii_data  *mii;
 3362         uint16_t eaddr[ETHER_ADDR_LEN / 2];
 3363         uint16_t gmac;
 3364         uint32_t reg;
 3365         int error, i;
 3366 
 3367         ASSERT_SERIALIZED(ifp->if_serializer);
 3368 
 3369         mii = device_get_softc(sc_if->msk_miibus);
 3370 
 3371         error = 0;
 3372         /* Cancel pending I/O and free all Rx/Tx buffers. */
 3373         msk_stop(sc_if);
 3374 
 3375         sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
 3376         if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
 3377             sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
 3378                 /*
 3379                  * In Yukon EC Ultra, TSO & checksum offload is not
 3380                  * supported for jumbo frame.
 3381                  */
 3382                 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
 3383                 ifp->if_capenable &= ~IFCAP_TXCSUM;
 3384         }
 3385 
 3386         /* GMAC Control reset. */
 3387         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
 3388         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
 3389         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
 3390         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3391             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 3392                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
 3393                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
 3394                     GMC_BYP_RETR_ON);
 3395         }
 3396 
 3397         /*
 3398          * Initialize GMAC first such that speed/duplex/flow-control
 3399          * parameters are renegotiated when interface is brought up.
 3400          */
 3401         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
 3402 
 3403         /* Dummy read the Interrupt Source Register. */
 3404         CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
 3405 
 3406         /* Set MIB Clear Counter Mode. */
 3407         gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
 3408         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
 3409         /* Read all MIB Counters with Clear Mode set. */
 3410         for (i = 0; i < GM_MIB_CNT_SIZE; i++)
 3411                 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
 3412         /* Clear MIB Clear Counter Mode. */
 3413         gmac &= ~GM_PAR_MIB_CLR;
 3414         GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
 3415 
 3416         /* Disable FCS. */
 3417         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
 3418 
 3419         /* Setup Transmit Control Register. */
 3420         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 3421 
 3422         /* Setup Transmit Flow Control Register. */
 3423         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
 3424 
 3425         /* Setup Transmit Parameter Register. */
 3426         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
 3427             TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 3428             TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 3429 
 3430         gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 3431             GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 3432 
 3433         if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
 3434                 gmac |= GM_SMOD_JUMBO_ENA;
 3435         GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
 3436 
 3437         /* Set station address. */
 3438         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
 3439         for (i = 0; i < ETHER_ADDR_LEN /2; i++)
 3440                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
 3441                     eaddr[i]);
 3442         for (i = 0; i < ETHER_ADDR_LEN /2; i++)
 3443                 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
 3444                     eaddr[i]);
 3445 
 3446         /* Disable interrupts for counter overflows. */
 3447         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
 3448         GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
 3449         GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
 3450 
 3451         /* Configure Rx MAC FIFO. */
 3452         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3453         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
 3454         reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 3455         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
 3456             sc->msk_hw_id == CHIP_ID_YUKON_EX)
 3457                 reg |= GMF_RX_OVER_ON;
 3458         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
 3459 
 3460         /* Set receive filter. */
 3461         msk_rxfilter(sc_if);
 3462 
 3463         if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
 3464                 /* Clear flush mask - HW bug. */
 3465                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
 3466         } else {
 3467                 /* Flush Rx MAC FIFO on any flow control or error. */
 3468                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
 3469                     GMR_FS_ANY_ERR);
 3470         }
 3471 
 3472         /*
 3473          * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
 3474          * due to hardware hang on receipt of pause frames.
 3475          */
 3476         reg = RX_GMF_FL_THR_DEF + 1;
 3477         /* Another magic for Yukon FE+ - From Linux. */
 3478         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3479             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
 3480                 reg = 0x178;
 3481         CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
 3482 
 3483 
 3484         /* Configure Tx MAC FIFO. */
 3485         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3486         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
 3487         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
 3488 
 3489         /* Configure hardware VLAN tag insertion/stripping. */
 3490         msk_setvlan(sc_if, ifp);
 3491 
 3492         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
 3493                 /* Set Rx Pause threshould. */
 3494                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
 3495                     MSK_ECU_LLPP);
 3496                 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
 3497                     MSK_ECU_ULPP);
 3498                 /* Configure store-and-forward for Tx. */
 3499                 msk_set_tx_stfwd(sc_if);
 3500         }
 3501 
 3502         if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
 3503             sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
 3504                 /* Disable dynamic watermark - from Linux. */
 3505                 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
 3506                 reg &= ~0x03;
 3507                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
 3508         }
 3509 
 3510         /*
 3511          * Disable Force Sync bit and Alloc bit in Tx RAM interface
 3512          * arbiter as we don't use Sync Tx queue.
 3513          */
 3514         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
 3515             TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
 3516         /* Enable the RAM Interface Arbiter. */
 3517         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
 3518 
 3519         /* Setup RAM buffer. */
 3520         msk_set_rambuffer(sc_if);
 3521 
 3522         /* Disable Tx sync Queue. */
 3523         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
 3524 
 3525         /* Setup Tx Queue Bus Memory Interface. */
 3526         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
 3527         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
 3528         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
 3529         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
 3530         switch (sc->msk_hw_id) {
 3531         case CHIP_ID_YUKON_EC_U:
 3532                 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
 3533                         /* Fix for Yukon-EC Ultra: set BMU FIFO level */
 3534                         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
 3535                             MSK_ECU_TXFF_LEV);
 3536                 }
 3537                 break;
 3538         case CHIP_ID_YUKON_EX:
 3539                 /*
 3540                  * Yukon Extreme seems to have silicon bug for
 3541                  * automatic Tx checksum calculation capability.
 3542                  */
 3543                 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
 3544                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
 3545                             F_TX_CHK_AUTO_OFF);
 3546                 }
 3547                 break;
 3548         }
 3549 
 3550         /* Setup Rx Queue Bus Memory Interface. */
 3551         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
 3552         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
 3553         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
 3554         CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
 3555         if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
 3556             sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
 3557                 /* MAC Rx RAM Read is controlled by hardware. */
 3558                 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
 3559         }
 3560 
 3561         msk_set_prefetch(sc, sc_if->msk_txq,
 3562             sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
 3563         msk_init_tx_ring(sc_if);
 3564 
 3565         /* Disable Rx checksum offload and RSS hash. */
 3566         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 3567             BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
 3568 #ifdef MSK_JUMBO
 3569         if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
 3570                 msk_set_prefetch(sc, sc_if->msk_rxq,
 3571                     sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
 3572                     MSK_JUMBO_RX_RING_CNT - 1);
 3573                 error = msk_init_jumbo_rx_ring(sc_if);
 3574         } else
 3575 #endif
 3576         {
 3577                 msk_set_prefetch(sc, sc_if->msk_rxq,
 3578                     sc_if->msk_rdata.msk_rx_ring_paddr,
 3579                     MSK_RX_RING_CNT - 1);
 3580                 error = msk_init_rx_ring(sc_if);
 3581         }
 3582         if (error != 0) {
 3583                 device_printf(sc_if->msk_if_dev,
 3584                     "initialization failed: no memory for Rx buffers\n");
 3585                 msk_stop(sc_if);
 3586                 return;
 3587         }
 3588         if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
 3589             sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
 3590                 /* Disable flushing of non-ASF packets. */
 3591                 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
 3592                     GMF_RX_MACSEC_FLUSH_OFF);
 3593         }
 3594 
 3595         /* Configure interrupt handling. */
 3596         if (sc_if->msk_port == MSK_PORT_A) {
 3597                 sc->msk_intrmask |= Y2_IS_PORT_A;
 3598                 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
 3599         } else {
 3600                 sc->msk_intrmask |= Y2_IS_PORT_B;
 3601                 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
 3602         }
 3603         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 3604         CSR_READ_4(sc, B0_HWE_IMSK);
 3605         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3606         CSR_READ_4(sc, B0_IMSK);
 3607 
 3608         sc_if->msk_link = 0;
 3609         mii_mediachg(mii);
 3610 
 3611         mskc_set_imtimer(sc);
 3612 
 3613         ifp->if_flags |= IFF_RUNNING;
 3614         ifq_clr_oactive(&ifp->if_snd);
 3615 
 3616         callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
 3617 }
 3618 
 3619 static void
 3620 msk_set_rambuffer(struct msk_if_softc *sc_if)
 3621 {
 3622         struct msk_softc *sc;
 3623         int ltpp, utpp;
 3624 
 3625         if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
 3626                 return;
 3627 
 3628         sc = sc_if->msk_softc;
 3629 
 3630         /* Setup Rx Queue. */
 3631         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
 3632         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
 3633             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3634         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
 3635             sc->msk_rxqend[sc_if->msk_port] / 8);
 3636         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
 3637             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3638         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
 3639             sc->msk_rxqstart[sc_if->msk_port] / 8);
 3640 
 3641         utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 3642             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
 3643         ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
 3644             sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
 3645         if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
 3646                 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
 3647         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
 3648         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
 3649         /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
 3650 
 3651         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
 3652         CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
 3653 
 3654         /* Setup Tx Queue. */
 3655         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
 3656         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
 3657             sc->msk_txqstart[sc_if->msk_port] / 8);
 3658         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
 3659             sc->msk_txqend[sc_if->msk_port] / 8);
 3660         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
 3661             sc->msk_txqstart[sc_if->msk_port] / 8);
 3662         CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
 3663             sc->msk_txqstart[sc_if->msk_port] / 8);
 3664         /* Enable Store & Forward for Tx side. */
 3665         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
 3666         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
 3667         CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
 3668 }
 3669 
 3670 static void
 3671 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
 3672     uint32_t count)
 3673 {
 3674 
 3675         /* Reset the prefetch unit. */
 3676         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 3677             PREF_UNIT_RST_SET);
 3678         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 3679             PREF_UNIT_RST_CLR);
 3680         /* Set LE base address. */
 3681         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
 3682             MSK_ADDR_LO(addr));
 3683         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
 3684             MSK_ADDR_HI(addr));
 3685         /* Set the list last index. */
 3686         CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
 3687             count);
 3688         /* Turn on prefetch unit. */
 3689         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
 3690             PREF_UNIT_OP_ON);
 3691         /* Dummy read to ensure write. */
 3692         CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
 3693 }
 3694 
 3695 static void
 3696 msk_stop(struct msk_if_softc *sc_if)
 3697 {
 3698         struct msk_softc *sc = sc_if->msk_softc;
 3699         struct ifnet *ifp = sc_if->msk_ifp;
 3700         struct msk_txdesc *txd;
 3701         struct msk_rxdesc *rxd;
 3702 #ifdef MSK_JUMBO
 3703         struct msk_rxdesc *jrxd;
 3704 #endif
 3705         uint32_t val;
 3706         int i;
 3707 
 3708         ASSERT_SERIALIZED(ifp->if_serializer);
 3709 
 3710         callout_stop(&sc_if->msk_tick_ch);
 3711         ifp->if_timer = 0;
 3712 
 3713         /* Disable interrupts. */
 3714         if (sc_if->msk_port == MSK_PORT_A) {
 3715                 sc->msk_intrmask &= ~Y2_IS_PORT_A;
 3716                 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
 3717         } else {
 3718                 sc->msk_intrmask &= ~Y2_IS_PORT_B;
 3719                 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
 3720         }
 3721         CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
 3722         CSR_READ_4(sc, B0_HWE_IMSK);
 3723         CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
 3724         CSR_READ_4(sc, B0_IMSK);
 3725 
 3726         /* Disable Tx/Rx MAC. */
 3727         val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 3728         val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
 3729         GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
 3730         /* Read again to ensure writing. */
 3731         GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
 3732 
 3733         /* Stop Tx BMU. */
 3734         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
 3735         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 3736         for (i = 0; i < MSK_TIMEOUT; i++) {
 3737                 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
 3738                         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 3739                             BMU_STOP);
 3740                         val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
 3741                 } else
 3742                         break;
 3743                 DELAY(1);
 3744         }
 3745         if (i == MSK_TIMEOUT)
 3746                 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
 3747         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
 3748             RB_RST_SET | RB_DIS_OP_MD);
 3749 
 3750         /* Disable all GMAC interrupt. */
 3751         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
 3752         /* Disable PHY interrupt. */
 3753         msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
 3754 
 3755         /* Disable the RAM Interface Arbiter. */
 3756         CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
 3757 
 3758         /* Reset the PCI FIFO of the async Tx queue */
 3759         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
 3760             BMU_RST_SET | BMU_FIFO_RST);
 3761 
 3762         /* Reset the Tx prefetch units. */
 3763         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
 3764             PREF_UNIT_RST_SET);
 3765 
 3766         /* Reset the RAM Buffer async Tx queue. */
 3767         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
 3768 
 3769         /* Reset Tx MAC FIFO. */
 3770         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
 3771         /* Set Pause Off. */
 3772         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
 3773 
 3774         /*
 3775          * The Rx Stop command will not work for Yukon-2 if the BMU does not
 3776          * reach the end of packet and since we can't make sure that we have
 3777          * incoming data, we must reset the BMU while it is not during a DMA
 3778          * transfer. Since it is possible that the Rx path is still active,
 3779          * the Rx RAM buffer will be stopped first, so any possible incoming
 3780          * data will not trigger a DMA. After the RAM buffer is stopped, the
 3781          * BMU is polled until any DMA in progress is ended and only then it
 3782          * will be reset.
 3783          */
 3784 
 3785         /* Disable the RAM Buffer receive queue. */
 3786         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
 3787         for (i = 0; i < MSK_TIMEOUT; i++) {
 3788                 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
 3789                     CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
 3790                         break;
 3791                 DELAY(1);
 3792         }
 3793         if (i == MSK_TIMEOUT)
 3794                 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
 3795         CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
 3796             BMU_RST_SET | BMU_FIFO_RST);
 3797         /* Reset the Rx prefetch unit. */
 3798         CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
 3799             PREF_UNIT_RST_SET);
 3800         /* Reset the RAM Buffer receive queue. */
 3801         CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
 3802         /* Reset Rx MAC FIFO. */
 3803         CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
 3804 
 3805         /* Free Rx and Tx mbufs still in the queues. */
 3806         for (i = 0; i < MSK_RX_RING_CNT; i++) {
 3807                 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
 3808                 if (rxd->rx_m != NULL) {
 3809                         bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
 3810                             rxd->rx_dmamap);
 3811                         m_freem(rxd->rx_m);
 3812                         rxd->rx_m = NULL;
 3813                 }
 3814         }
 3815 #ifdef MSK_JUMBO
 3816         for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
 3817                 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
 3818                 if (jrxd->rx_m != NULL) {
 3819                         bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
 3820                             jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
 3821                         bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
 3822                             jrxd->rx_dmamap);
 3823                         m_freem(jrxd->rx_m);
 3824                         jrxd->rx_m = NULL;
 3825                 }
 3826         }
 3827 #endif
 3828         for (i = 0; i < MSK_TX_RING_CNT; i++) {
 3829                 txd = &sc_if->msk_cdata.msk_txdesc[i];
 3830                 if (txd->tx_m != NULL) {
 3831                         bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
 3832                             txd->tx_dmamap);
 3833                         m_freem(txd->tx_m);
 3834                         txd->tx_m = NULL;
 3835                 }
 3836         }
 3837 
 3838         /*
 3839          * Mark the interface down.
 3840          */
 3841         ifp->if_flags &= ~IFF_RUNNING;
 3842         ifq_clr_oactive(&ifp->if_snd);
 3843         sc_if->msk_link = 0;
 3844 }
 3845 
 3846 static int
 3847 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
 3848 {
 3849         return sysctl_int_range(oidp, arg1, arg2, req,
 3850                                 MSK_PROC_MIN, MSK_PROC_MAX);
 3851 }
 3852 
 3853 static int
 3854 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
 3855 {
 3856         struct msk_softc *sc = arg1;
 3857         struct lwkt_serialize *serializer = &sc->msk_serializer;
 3858         int error = 0, v;
 3859 
 3860         lwkt_serialize_enter(serializer);
 3861 
 3862         v = sc->msk_intr_rate;
 3863         error = sysctl_handle_int(oidp, &v, 0, req);
 3864         if (error || req->newptr == NULL)
 3865                 goto back;
 3866         if (v < 0) {
 3867                 error = EINVAL;
 3868                 goto back;
 3869         }
 3870 
 3871         if (sc->msk_intr_rate != v) {
 3872                 int flag = 0, i;
 3873 
 3874                 sc->msk_intr_rate = v;
 3875                 for (i = 0; i < 2; ++i) {
 3876                         if (sc->msk_if[i] != NULL) {
 3877                                 flag |= sc->msk_if[i]->
 3878                                         arpcom.ac_if.if_flags & IFF_RUNNING;
 3879                         }
 3880                 }
 3881                 if (flag)
 3882                         mskc_set_imtimer(sc);
 3883         }
 3884 back:
 3885         lwkt_serialize_exit(serializer);
 3886         return error;
 3887 }
 3888 
 3889 static int
 3890 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
 3891                   void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
 3892 {
 3893         struct msk_if_softc *sc_if = device_get_softc(dev);
 3894         bus_dmamem_t dmem;
 3895         int error;
 3896 
 3897         error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag,
 3898                         MSK_RING_ALIGN, 0,
 3899                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
 3900                         size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
 3901         if (error) {
 3902                 device_printf(dev, "can't create coherent DMA memory\n");
 3903                 return error;
 3904         }
 3905 
 3906         *dtag = dmem.dmem_tag;
 3907         *dmap = dmem.dmem_map;
 3908         *addr = dmem.dmem_addr;
 3909         *paddr = dmem.dmem_busaddr;
 3910 
 3911         return 0;
 3912 }
 3913 
 3914 static void
 3915 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
 3916 {
 3917         if (dtag != NULL) {
 3918                 bus_dmamap_unload(dtag, dmap);
 3919                 bus_dmamem_free(dtag, addr, dmap);
 3920                 bus_dma_tag_destroy(dtag);
 3921         }
 3922 }
 3923 
 3924 static void
 3925 mskc_set_imtimer(struct msk_softc *sc)
 3926 {
 3927         if (sc->msk_intr_rate > 0) {
 3928                 /*
 3929                  * XXX myk(4) seems to use 125MHz for EC/FE/XL
 3930                  *     and 78.125MHz for rest of chip types
 3931                  */
 3932                 CSR_WRITE_4(sc, B2_IRQM_INI,
 3933                             MSK_USECS(sc, 1000000 / sc->msk_intr_rate));
 3934                 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
 3935                 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START);
 3936         } else {
 3937                 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP);
 3938         }
 3939 }

Cache object: f8944283b0b65b1d45c40a5565b2e5b5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.