The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/dwc/if_dwc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
    3  * All rights reserved.
    4  *
    5  * This software was developed by SRI International and the University of
    6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
    7  * ("CTSRD"), as part of the DARPA CRASH research programme.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 /*
   32  * Ethernet media access controller (EMAC)
   33  * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
   34  *
   35  * EMAC is an instance of the Synopsys DesignWare 3504-0
   36  * Universal 10/100/1000 Ethernet MAC (DWC_gmac).
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/bus.h>
   45 #include <sys/gpio.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mbuf.h>
   50 #include <sys/module.h>
   51 #include <sys/mutex.h>
   52 #include <sys/rman.h>
   53 #include <sys/socket.h>
   54 #include <sys/sockio.h>
   55 
   56 #include <net/bpf.h>
   57 #include <net/if.h>
   58 #include <net/ethernet.h>
   59 #include <net/if_dl.h>
   60 #include <net/if_media.h>
   61 #include <net/if_types.h>
   62 #include <net/if_var.h>
   63 
   64 #include <machine/bus.h>
   65 
   66 #include <dev/dwc/if_dwc.h>
   67 #include <dev/dwc/if_dwcvar.h>
   68 #include <dev/mii/mii.h>
   69 #include <dev/mii/miivar.h>
   70 #include <dev/ofw/ofw_bus.h>
   71 #include <dev/ofw/ofw_bus_subr.h>
   72 #include <dev/mii/mii_fdt.h>
   73 
   74 #include <dev/extres/clk/clk.h>
   75 #include <dev/extres/hwreset/hwreset.h>
   76 
   77 #include "if_dwc_if.h"
   78 #include "gpio_if.h"
   79 #include "miibus_if.h"
   80 
   81 #define READ4(_sc, _reg) \
   82         bus_read_4((_sc)->res[0], _reg)
   83 #define WRITE4(_sc, _reg, _val) \
   84         bus_write_4((_sc)->res[0], _reg, _val)
   85 
   86 #define MAC_RESET_TIMEOUT       100
   87 #define WATCHDOG_TIMEOUT_SECS   5
   88 #define STATS_HARVEST_INTERVAL  2
   89 
   90 #define DWC_LOCK(sc)                    mtx_lock(&(sc)->mtx)
   91 #define DWC_UNLOCK(sc)                  mtx_unlock(&(sc)->mtx)
   92 #define DWC_ASSERT_LOCKED(sc)           mtx_assert(&(sc)->mtx, MA_OWNED)
   93 #define DWC_ASSERT_UNLOCKED(sc)         mtx_assert(&(sc)->mtx, MA_NOTOWNED)
   94 
   95 /* TX descriptors - TDESC0 is almost unified */
   96 #define TDESC0_OWN              (1U << 31)
   97 #define TDESC0_IHE              (1U << 16)      /* IP Header Error */
   98 #define TDESC0_ES               (1U << 15)      /* Error Summary */
   99 #define TDESC0_JT               (1U << 14)      /* Jabber Timeout */
  100 #define TDESC0_FF               (1U << 13)      /* Frame Flushed */
  101 #define TDESC0_PCE              (1U << 12)      /* Payload Checksum Error */
  102 #define TDESC0_LOC              (1U << 11)      /* Loss of Carrier */
  103 #define TDESC0_NC               (1U << 10)      /* No Carrier */
  104 #define TDESC0_LC               (1U <<  9)      /* Late Collision */
  105 #define TDESC0_EC               (1U <<  8)      /* Excessive Collision */
  106 #define TDESC0_VF               (1U <<  7)      /* VLAN Frame */
  107 #define TDESC0_CC_MASK          0xf
  108 #define TDESC0_CC_SHIFT         3               /* Collision Count */
  109 #define TDESC0_ED               (1U <<  2)      /* Excessive Deferral */
  110 #define TDESC0_UF               (1U <<  1)      /* Underflow Error */
  111 #define TDESC0_DB               (1U <<  0)      /* Deferred Bit */
  112 /* TX descriptors - TDESC0 extended format only */
  113 #define ETDESC0_IC              (1U << 30)      /* Interrupt on Completion */
  114 #define ETDESC0_LS              (1U << 29)      /* Last Segment */
  115 #define ETDESC0_FS              (1U << 28)      /* First Segment */
  116 #define ETDESC0_DC              (1U << 27)      /* Disable CRC */
  117 #define ETDESC0_DP              (1U << 26)      /* Disable Padding */
  118 #define ETDESC0_CIC_NONE        (0U << 22)      /* Checksum Insertion Control */
  119 #define ETDESC0_CIC_HDR         (1U << 22)
  120 #define ETDESC0_CIC_SEG         (2U << 22)
  121 #define ETDESC0_CIC_FULL        (3U << 22)
  122 #define ETDESC0_TER             (1U << 21)      /* Transmit End of Ring */
  123 #define ETDESC0_TCH             (1U << 20)      /* Second Address Chained */
  124 
  125 /* TX descriptors - TDESC1 normal format */
  126 #define NTDESC1_IC              (1U << 31)      /* Interrupt on Completion */
  127 #define NTDESC1_LS              (1U << 30)      /* Last Segment */
  128 #define NTDESC1_FS              (1U << 29)      /* First Segment */
  129 #define NTDESC1_CIC_NONE        (0U << 27)      /* Checksum Insertion Control */
  130 #define NTDESC1_CIC_HDR         (1U << 27)
  131 #define NTDESC1_CIC_SEG         (2U << 27)
  132 #define NTDESC1_CIC_FULL        (3U << 27)
  133 #define NTDESC1_DC              (1U << 26)      /* Disable CRC */
  134 #define NTDESC1_TER             (1U << 25)      /* Transmit End of Ring */
  135 #define NTDESC1_TCH             (1U << 24)      /* Second Address Chained */
  136 /* TX descriptors - TDESC1 extended format */
  137 #define ETDESC1_DP              (1U << 23)      /* Disable Padding */
  138 #define ETDESC1_TBS2_MASK       0x7ff
  139 #define ETDESC1_TBS2_SHIFT      11              /* Receive Buffer 2 Size */
  140 #define ETDESC1_TBS1_MASK       0x7ff
  141 #define ETDESC1_TBS1_SHIFT      0               /* Receive Buffer 1 Size */
  142 
  143 /* RX descriptor - RDESC0 is unified */
  144 #define RDESC0_OWN              (1U << 31)
  145 #define RDESC0_AFM              (1U << 30)      /* Dest. Address Filter Fail */
  146 #define RDESC0_FL_MASK          0x3fff
  147 #define RDESC0_FL_SHIFT         16              /* Frame Length */
  148 #define RDESC0_ES               (1U << 15)      /* Error Summary */
  149 #define RDESC0_DE               (1U << 14)      /* Descriptor Error */
  150 #define RDESC0_SAF              (1U << 13)      /* Source Address Filter Fail */
  151 #define RDESC0_LE               (1U << 12)      /* Length Error */
  152 #define RDESC0_OE               (1U << 11)      /* Overflow Error */
  153 #define RDESC0_VLAN             (1U << 10)      /* VLAN Tag */
  154 #define RDESC0_FS               (1U <<  9)      /* First Descriptor */
  155 #define RDESC0_LS               (1U <<  8)      /* Last Descriptor */
  156 #define RDESC0_ICE              (1U <<  7)      /* IPC Checksum Error */
  157 #define RDESC0_LC               (1U <<  6)      /* Late Collision */
  158 #define RDESC0_FT               (1U <<  5)      /* Frame Type */
  159 #define RDESC0_RWT              (1U <<  4)      /* Receive Watchdog Timeout */
  160 #define RDESC0_RE               (1U <<  3)      /* Receive Error */
  161 #define RDESC0_DBE              (1U <<  2)      /* Dribble Bit Error */
  162 #define RDESC0_CE               (1U <<  1)      /* CRC Error */
  163 #define RDESC0_PCE              (1U <<  0)      /* Payload Checksum Error */
  164 #define RDESC0_RXMA             (1U <<  0)      /* Rx MAC Address */
  165 
  166 /* RX descriptors - RDESC1 normal format */
  167 #define NRDESC1_DIC             (1U << 31)      /* Disable Intr on Completion */
  168 #define NRDESC1_RER             (1U << 25)      /* Receive End of Ring */
  169 #define NRDESC1_RCH             (1U << 24)      /* Second Address Chained */
  170 #define NRDESC1_RBS2_MASK       0x7ff
  171 #define NRDESC1_RBS2_SHIFT      11              /* Receive Buffer 2 Size */
  172 #define NRDESC1_RBS1_MASK       0x7ff
  173 #define NRDESC1_RBS1_SHIFT      0               /* Receive Buffer 1 Size */
  174 
  175 /* RX descriptors - RDESC1 enhanced format */
  176 #define ERDESC1_DIC             (1U << 31)      /* Disable Intr on Completion */
  177 #define ERDESC1_RBS2_MASK       0x7ffff
  178 #define ERDESC1_RBS2_SHIFT      16              /* Receive Buffer 2 Size */
  179 #define ERDESC1_RER             (1U << 15)      /* Receive End of Ring */
  180 #define ERDESC1_RCH             (1U << 14)      /* Second Address Chained */
  181 #define ERDESC1_RBS1_MASK       0x7ffff
  182 #define ERDESC1_RBS1_SHIFT      0               /* Receive Buffer 1 Size */
  183 
  184 /*
  185  * A hardware buffer descriptor.  Rx and Tx buffers have the same descriptor
  186  * layout, but the bits in the fields have different meanings.
  187  */
  188 struct dwc_hwdesc
  189 {
  190         uint32_t desc0;
  191         uint32_t desc1;
  192         uint32_t addr1;         /* ptr to first buffer data */
  193         uint32_t addr2;         /* ptr to next descriptor / second buffer data*/
  194 };
  195 
  196 
  197 struct dwc_hash_maddr_ctx {
  198         struct dwc_softc *sc;
  199         uint32_t hash[8];
  200 };
  201 
  202 /*
  203  * The hardware imposes alignment restrictions on various objects involved in
  204  * DMA transfers.  These values are expressed in bytes (not bits).
  205  */
  206 #define DWC_DESC_RING_ALIGN     2048
  207 
  208 static struct resource_spec dwc_spec[] = {
  209         { SYS_RES_MEMORY,       0,      RF_ACTIVE },
  210         { SYS_RES_IRQ,          0,      RF_ACTIVE },
  211         { -1, 0 }
  212 };
  213 
  214 static void dwc_txfinish_locked(struct dwc_softc *sc);
  215 static void dwc_rxfinish_locked(struct dwc_softc *sc);
  216 static void dwc_stop_locked(struct dwc_softc *sc);
  217 static void dwc_setup_rxfilter(struct dwc_softc *sc);
  218 static void dwc_setup_core(struct dwc_softc *sc);
  219 static void dwc_enable_mac(struct dwc_softc *sc, bool enable);
  220 static void dwc_init_dma(struct dwc_softc *sc);
  221 static void dwc_stop_dma(struct dwc_softc *sc);
  222 
  223 static void dwc_tick(void *arg);
  224 
  225 /* Pause time field in the transmitted control frame */
  226 static int dwc_pause_time = 0xffff;
  227 TUNABLE_INT("hw.dwc.pause_time", &dwc_pause_time);
  228 
  229 /*
  230  * MIIBUS functions
  231  */
  232 
  233 static int
  234 dwc_miibus_read_reg(device_t dev, int phy, int reg)
  235 {
  236         struct dwc_softc *sc;
  237         uint16_t mii;
  238         size_t cnt;
  239         int rv = 0;
  240 
  241         sc = device_get_softc(dev);
  242 
  243         mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT)
  244             | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT)
  245             | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT)
  246             | GMII_ADDRESS_GB; /* Busy flag */
  247 
  248         WRITE4(sc, GMII_ADDRESS, mii);
  249 
  250         for (cnt = 0; cnt < 1000; cnt++) {
  251                 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) {
  252                         rv = READ4(sc, GMII_DATA);
  253                         break;
  254                 }
  255                 DELAY(10);
  256         }
  257 
  258         return rv;
  259 }
  260 
  261 static int
  262 dwc_miibus_write_reg(device_t dev, int phy, int reg, int val)
  263 {
  264         struct dwc_softc *sc;
  265         uint16_t mii;
  266         size_t cnt;
  267 
  268         sc = device_get_softc(dev);
  269 
  270         mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT)
  271             | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT)
  272             | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT)
  273             | GMII_ADDRESS_GB | GMII_ADDRESS_GW;
  274 
  275         WRITE4(sc, GMII_DATA, val);
  276         WRITE4(sc, GMII_ADDRESS, mii);
  277 
  278         for (cnt = 0; cnt < 1000; cnt++) {
  279                 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) {
  280                         break;
  281                 }
  282                 DELAY(10);
  283         }
  284 
  285         return (0);
  286 }
  287 
  288 static void
  289 dwc_miibus_statchg(device_t dev)
  290 {
  291         struct dwc_softc *sc;
  292         struct mii_data *mii;
  293         uint32_t reg;
  294 
  295         /*
  296          * Called by the MII bus driver when the PHY establishes
  297          * link to set the MAC interface registers.
  298          */
  299 
  300         sc = device_get_softc(dev);
  301 
  302         DWC_ASSERT_LOCKED(sc);
  303 
  304         mii = sc->mii_softc;
  305 
  306         if (mii->mii_media_status & IFM_ACTIVE)
  307                 sc->link_is_up = true;
  308         else
  309                 sc->link_is_up = false;
  310 
  311         reg = READ4(sc, MAC_CONFIGURATION);
  312         switch (IFM_SUBTYPE(mii->mii_media_active)) {
  313         case IFM_1000_T:
  314         case IFM_1000_SX:
  315                 reg &= ~(CONF_FES | CONF_PS);
  316                 break;
  317         case IFM_100_TX:
  318                 reg |= (CONF_FES | CONF_PS);
  319                 break;
  320         case IFM_10_T:
  321                 reg &= ~(CONF_FES);
  322                 reg |= (CONF_PS);
  323                 break;
  324         case IFM_NONE:
  325                 sc->link_is_up = false;
  326                 return;
  327         default:
  328                 sc->link_is_up = false;
  329                 device_printf(dev, "Unsupported media %u\n",
  330                     IFM_SUBTYPE(mii->mii_media_active));
  331                 return;
  332         }
  333         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  334                 reg |= (CONF_DM);
  335         else
  336                 reg &= ~(CONF_DM);
  337         WRITE4(sc, MAC_CONFIGURATION, reg);
  338 
  339         reg = FLOW_CONTROL_UP;
  340         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
  341                 reg |= FLOW_CONTROL_TX;
  342         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
  343                 reg |= FLOW_CONTROL_RX;
  344         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
  345                 reg |= dwc_pause_time << FLOW_CONTROL_PT_SHIFT;
  346         WRITE4(sc, FLOW_CONTROL, reg);
  347 
  348         IF_DWC_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active));
  349 
  350 }
  351 
  352 /*
  353  * Media functions
  354  */
  355 
  356 static void
  357 dwc_media_status(if_t ifp, struct ifmediareq *ifmr)
  358 {
  359         struct dwc_softc *sc;
  360         struct mii_data *mii;
  361 
  362         sc = if_getsoftc(ifp);
  363         mii = sc->mii_softc;
  364         DWC_LOCK(sc);
  365         mii_pollstat(mii);
  366         ifmr->ifm_active = mii->mii_media_active;
  367         ifmr->ifm_status = mii->mii_media_status;
  368         DWC_UNLOCK(sc);
  369 }
  370 
  371 static int
  372 dwc_media_change_locked(struct dwc_softc *sc)
  373 {
  374 
  375         return (mii_mediachg(sc->mii_softc));
  376 }
  377 
  378 static int
  379 dwc_media_change(if_t ifp)
  380 {
  381         struct dwc_softc *sc;
  382         int error;
  383 
  384         sc = if_getsoftc(ifp);
  385 
  386         DWC_LOCK(sc);
  387         error = dwc_media_change_locked(sc);
  388         DWC_UNLOCK(sc);
  389         return (error);
  390 }
  391 
  392 /*
  393  * Core functions
  394  */
  395 
  396 static const uint8_t nibbletab[] = {
  397         /* 0x0 0000 -> 0000 */  0x0,
  398         /* 0x1 0001 -> 1000 */  0x8,
  399         /* 0x2 0010 -> 0100 */  0x4,
  400         /* 0x3 0011 -> 1100 */  0xc,
  401         /* 0x4 0100 -> 0010 */  0x2,
  402         /* 0x5 0101 -> 1010 */  0xa,
  403         /* 0x6 0110 -> 0110 */  0x6,
  404         /* 0x7 0111 -> 1110 */  0xe,
  405         /* 0x8 1000 -> 0001 */  0x1,
  406         /* 0x9 1001 -> 1001 */  0x9,
  407         /* 0xa 1010 -> 0101 */  0x5,
  408         /* 0xb 1011 -> 1101 */  0xd,
  409         /* 0xc 1100 -> 0011 */  0x3,
  410         /* 0xd 1101 -> 1011 */  0xb,
  411         /* 0xe 1110 -> 0111 */  0x7,
  412         /* 0xf 1111 -> 1111 */  0xf, };
  413 
  414 static uint8_t
  415 bitreverse(uint8_t x)
  416 {
  417 
  418         return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4];
  419 }
  420 
  421 static u_int
  422 dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
  423 {
  424         struct dwc_hash_maddr_ctx *ctx = arg;
  425         uint32_t crc, hashbit, hashreg;
  426         uint8_t val;
  427 
  428         crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
  429         /* Take lower 8 bits and reverse it */
  430         val = bitreverse(~crc & 0xff);
  431         if (ctx->sc->mactype != DWC_GMAC_EXT_DESC)
  432                 val >>= 2; /* Only need lower 6 bits */
  433         hashreg = (val >> 5);
  434         hashbit = (val & 31);
  435         ctx->hash[hashreg] |= (1 << hashbit);
  436 
  437         return (1);
  438 }
  439 
  440 static void
  441 dwc_setup_rxfilter(struct dwc_softc *sc)
  442 {
  443         struct dwc_hash_maddr_ctx ctx;
  444         if_t ifp;
  445         uint8_t *eaddr;
  446         uint32_t ffval, hi, lo;
  447         int nhash, i;
  448 
  449         DWC_ASSERT_LOCKED(sc);
  450 
  451         ifp = sc->ifp;
  452         nhash = sc->mactype != DWC_GMAC_EXT_DESC ? 2 : 8;
  453 
  454         /*
  455          * Set the multicast (group) filter hash.
  456          */
  457         if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) {
  458                 ffval = (FRAME_FILTER_PM);
  459                 for (i = 0; i < nhash; i++)
  460                         ctx.hash[i] = ~0;
  461         } else {
  462                 ffval = (FRAME_FILTER_HMC);
  463                 for (i = 0; i < nhash; i++)
  464                         ctx.hash[i] = 0;
  465                 ctx.sc = sc;
  466                 if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx);
  467         }
  468 
  469         /*
  470          * Set the individual address filter hash.
  471          */
  472         if ((if_getflags(ifp) & IFF_PROMISC) != 0)
  473                 ffval |= (FRAME_FILTER_PR);
  474 
  475         /*
  476          * Set the primary address.
  477          */
  478         eaddr = IF_LLADDR(ifp);
  479         lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) |
  480             (eaddr[3] << 24);
  481         hi = eaddr[4] | (eaddr[5] << 8);
  482         WRITE4(sc, MAC_ADDRESS_LOW(0), lo);
  483         WRITE4(sc, MAC_ADDRESS_HIGH(0), hi);
  484         WRITE4(sc, MAC_FRAME_FILTER, ffval);
  485         if (sc->mactype != DWC_GMAC_EXT_DESC) {
  486                 WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]);
  487                 WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]);
  488         } else {
  489                 for (i = 0; i < nhash; i++)
  490                         WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]);
  491         }
  492 }
  493 
  494 static void
  495 dwc_setup_core(struct dwc_softc *sc)
  496 {
  497         uint32_t reg;
  498 
  499         DWC_ASSERT_LOCKED(sc);
  500 
  501         /* Enable core */
  502         reg = READ4(sc, MAC_CONFIGURATION);
  503         reg |= (CONF_JD | CONF_ACS | CONF_BE);
  504         WRITE4(sc, MAC_CONFIGURATION, reg);
  505 }
  506 
  507 static void
  508 dwc_enable_mac(struct dwc_softc *sc, bool enable)
  509 {
  510         uint32_t reg;
  511 
  512         DWC_ASSERT_LOCKED(sc);
  513         reg = READ4(sc, MAC_CONFIGURATION);
  514         if (enable)
  515                 reg |= CONF_TE | CONF_RE;
  516         else
  517                 reg &= ~(CONF_TE | CONF_RE);
  518         WRITE4(sc, MAC_CONFIGURATION, reg);
  519 }
  520 
  521 static void
  522 dwc_enable_csum_offload(struct dwc_softc *sc)
  523 {
  524         uint32_t reg;
  525 
  526         DWC_ASSERT_LOCKED(sc);
  527         reg = READ4(sc, MAC_CONFIGURATION);
  528         if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0)
  529                 reg |= CONF_IPC;
  530         else
  531                 reg &= ~CONF_IPC;
  532         WRITE4(sc, MAC_CONFIGURATION, reg);
  533 }
  534 
  535 static void
  536 dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr)
  537 {
  538         uint32_t hi, lo, rnd;
  539 
  540         /*
  541          * Try to recover a MAC address from the running hardware. If there's
  542          * something non-zero there, assume the bootloader did the right thing
  543          * and just use it.
  544          *
  545          * Otherwise, set the address to a convenient locally assigned address,
  546          * 'bsd' + random 24 low-order bits.  'b' is 0x62, which has the locally
  547          * assigned bit set, and the broadcast/multicast bit clear.
  548          */
  549         lo = READ4(sc, MAC_ADDRESS_LOW(0));
  550         hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff;
  551         if ((lo != 0xffffffff) || (hi != 0xffff)) {
  552                 hwaddr[0] = (lo >>  0) & 0xff;
  553                 hwaddr[1] = (lo >>  8) & 0xff;
  554                 hwaddr[2] = (lo >> 16) & 0xff;
  555                 hwaddr[3] = (lo >> 24) & 0xff;
  556                 hwaddr[4] = (hi >>  0) & 0xff;
  557                 hwaddr[5] = (hi >>  8) & 0xff;
  558         } else {
  559                 rnd = arc4random() & 0x00ffffff;
  560                 hwaddr[0] = 'b';
  561                 hwaddr[1] = 's';
  562                 hwaddr[2] = 'd';
  563                 hwaddr[3] = rnd >> 16;
  564                 hwaddr[4] = rnd >>  8;
  565                 hwaddr[5] = rnd >>  0;
  566         }
  567 }
  568 
  569 /*
  570  * DMA functions
  571  */
  572 
  573 static void
  574 dwc_init_dma(struct dwc_softc *sc)
  575 {
  576         uint32_t reg;
  577 
  578         DWC_ASSERT_LOCKED(sc);
  579 
  580         /* Initializa DMA and enable transmitters */
  581         reg = READ4(sc, OPERATION_MODE);
  582         reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
  583         reg &= ~(MODE_RSF);
  584         reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
  585         WRITE4(sc, OPERATION_MODE, reg);
  586 
  587         WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
  588 
  589         /* Start DMA */
  590         reg = READ4(sc, OPERATION_MODE);
  591         reg |= (MODE_ST | MODE_SR);
  592         WRITE4(sc, OPERATION_MODE, reg);
  593 }
  594 
  595 static void
  596 dwc_stop_dma(struct dwc_softc *sc)
  597 {
  598         uint32_t reg;
  599 
  600         DWC_ASSERT_LOCKED(sc);
  601 
  602         /* Stop DMA TX */
  603         reg = READ4(sc, OPERATION_MODE);
  604         reg &= ~(MODE_ST);
  605         WRITE4(sc, OPERATION_MODE, reg);
  606 
  607         /* Flush TX */
  608         reg = READ4(sc, OPERATION_MODE);
  609         reg |= (MODE_FTF);
  610         WRITE4(sc, OPERATION_MODE, reg);
  611 
  612         /* Stop DMA RX */
  613         reg = READ4(sc, OPERATION_MODE);
  614         reg &= ~(MODE_SR);
  615         WRITE4(sc, OPERATION_MODE, reg);
  616 }
  617 
  618 static inline uint32_t
  619 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
  620 {
  621 
  622         return ((curidx + 1) % RX_DESC_COUNT);
  623 }
  624 
  625 static inline uint32_t
  626 next_txidx(struct dwc_softc *sc, uint32_t curidx)
  627 {
  628 
  629         return ((curidx + 1) % TX_DESC_COUNT);
  630 }
  631 
  632 static void
  633 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  634 {
  635 
  636         if (error != 0)
  637                 return;
  638         *(bus_addr_t *)arg = segs[0].ds_addr;
  639 }
  640 
  641 inline static void
  642 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr,
  643   uint32_t len, uint32_t flags, bool first, bool last)
  644 {
  645         uint32_t desc0, desc1;
  646 
  647         /* Addr/len 0 means we're clearing the descriptor after xmit done. */
  648         if (paddr == 0 || len == 0) {
  649                 desc0 = 0;
  650                 desc1 = 0;
  651                 --sc->tx_desccount;
  652         } else {
  653                 if (sc->mactype != DWC_GMAC_EXT_DESC) {
  654                         desc0 = 0;
  655                         desc1 = NTDESC1_TCH | len | flags;
  656                         if (first)
  657                                 desc1 |=  NTDESC1_FS;
  658                         if (last)
  659                                 desc1 |= NTDESC1_LS | NTDESC1_IC;
  660                 } else {
  661                         desc0 = ETDESC0_TCH | flags;
  662                         if (first)
  663                                 desc0 |= ETDESC0_FS;
  664                         if (last)
  665                                 desc0 |= ETDESC0_LS | ETDESC0_IC;
  666                         desc1 = len;
  667                 }
  668                 ++sc->tx_desccount;
  669         }
  670 
  671         sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
  672         sc->txdesc_ring[idx].desc0 = desc0;
  673         sc->txdesc_ring[idx].desc1 = desc1;
  674 }
  675 
  676 inline static void
  677 dwc_set_owner(struct dwc_softc *sc, int idx)
  678 {
  679         wmb();
  680         sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
  681         wmb();
  682 }
  683 
  684 static int
  685 dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
  686 {
  687         struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
  688         int error, nsegs;
  689         struct mbuf * m;
  690         uint32_t flags = 0;
  691         int i;
  692         int first, last;
  693 
  694         error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
  695             *mp, segs, &nsegs, 0);
  696         if (error == EFBIG) {
  697                 /*
  698                  * The map may be partially mapped from the first call.
  699                  * Make sure to reset it.
  700                  */
  701                 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
  702                 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
  703                         return (ENOMEM);
  704                 *mp = m;
  705                 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
  706                     *mp, segs, &nsegs, 0);
  707         }
  708         if (error != 0)
  709                 return (ENOMEM);
  710 
  711         if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
  712                 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
  713                 return (ENOMEM);
  714         }
  715 
  716         m = *mp;
  717 
  718         if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
  719                 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
  720                         if (sc->mactype != DWC_GMAC_EXT_DESC)
  721                                 flags = NTDESC1_CIC_FULL;
  722                         else
  723                                 flags = ETDESC0_CIC_FULL;
  724                 } else {
  725                         if (sc->mactype != DWC_GMAC_EXT_DESC)
  726                                 flags = NTDESC1_CIC_HDR;
  727                         else
  728                                 flags = ETDESC0_CIC_HDR;
  729                 }
  730         }
  731 
  732         bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
  733             BUS_DMASYNC_PREWRITE);
  734 
  735         sc->txbuf_map[idx].mbuf = m;
  736 
  737         first = sc->tx_desc_head;
  738         for (i = 0; i < nsegs; i++) {
  739                 dwc_setup_txdesc(sc, sc->tx_desc_head,
  740                     segs[i].ds_addr, segs[i].ds_len,
  741                     (i == 0) ? flags : 0, /* only first desc needs flags */
  742                     (i == 0),
  743                     (i == nsegs - 1));
  744                 if (i > 0)
  745                         dwc_set_owner(sc, sc->tx_desc_head);
  746                 last = sc->tx_desc_head;
  747                 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
  748         }
  749 
  750         sc->txbuf_map[idx].last_desc_idx = last;
  751 
  752         dwc_set_owner(sc, first);
  753 
  754         return (0);
  755 }
  756 
  757 inline static uint32_t
  758 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr)
  759 {
  760         uint32_t nidx;
  761 
  762         sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
  763         nidx = next_rxidx(sc, idx);
  764         sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
  765             (nidx * sizeof(struct dwc_hwdesc));
  766         if (sc->mactype != DWC_GMAC_EXT_DESC)
  767                 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
  768                     MIN(MCLBYTES, NRDESC1_RBS1_MASK);
  769         else
  770                 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
  771                     MIN(MCLBYTES, ERDESC1_RBS1_MASK);
  772 
  773         wmb();
  774         sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
  775         wmb();
  776         return (nidx);
  777 }
  778 
  779 static int
  780 dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
  781 {
  782         struct bus_dma_segment seg;
  783         int error, nsegs;
  784 
  785         m_adj(m, ETHER_ALIGN);
  786 
  787         error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
  788             m, &seg, &nsegs, 0);
  789         if (error != 0)
  790                 return (error);
  791 
  792         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
  793 
  794         bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
  795             BUS_DMASYNC_PREREAD);
  796 
  797         sc->rxbuf_map[idx].mbuf = m;
  798         dwc_setup_rxdesc(sc, idx, seg.ds_addr);
  799 
  800         return (0);
  801 }
  802 
  803 static struct mbuf *
  804 dwc_alloc_mbufcl(struct dwc_softc *sc)
  805 {
  806         struct mbuf *m;
  807 
  808         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
  809         if (m != NULL)
  810                 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
  811 
  812         return (m);
  813 }
  814 
  815 static struct mbuf *
  816 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
  817     struct dwc_bufmap *map)
  818 {
  819         if_t ifp;
  820         struct mbuf *m, *m0;
  821         int len;
  822         uint32_t rdesc0;
  823 
  824         m = map->mbuf;
  825         ifp = sc->ifp;
  826         rdesc0 = desc ->desc0;
  827 
  828         if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
  829                     (RDESC0_FS | RDESC0_LS)) {
  830                 /*
  831                  * Something very wrong happens. The whole packet should be
  832                  * recevied in one descriptr. Report problem.
  833                  */
  834                 device_printf(sc->dev,
  835                     "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
  836                     __func__, rdesc0);
  837                 return (NULL);
  838         }
  839 
  840         len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
  841         if (len < 64) {
  842                 /*
  843                  * Lenght is invalid, recycle old mbuf
  844                  * Probably impossible case
  845                  */
  846                 return (NULL);
  847         }
  848 
  849         /* Allocate new buffer */
  850         m0 = dwc_alloc_mbufcl(sc);
  851         if (m0 == NULL) {
  852                 /* no new mbuf available, recycle old */
  853                 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
  854                 return (NULL);
  855         }
  856         /* Do dmasync for newly received packet */
  857         bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
  858         bus_dmamap_unload(sc->rxbuf_tag, map->map);
  859 
  860         /* Received packet is valid, process it */
  861         m->m_pkthdr.rcvif = ifp;
  862         m->m_pkthdr.len = len;
  863         m->m_len = len;
  864         if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
  865 
  866         if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
  867           (rdesc0 & RDESC0_FT) != 0) {
  868                 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
  869                 if ((rdesc0 & RDESC0_ICE) == 0)
  870                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
  871                 if ((rdesc0 & RDESC0_PCE) == 0) {
  872                         m->m_pkthdr.csum_flags |=
  873                                 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
  874                         m->m_pkthdr.csum_data = 0xffff;
  875                 }
  876         }
  877 
  878         /* Remove trailing FCS */
  879         m_adj(m, -ETHER_CRC_LEN);
  880 
  881         DWC_UNLOCK(sc);
  882         if_input(ifp, m);
  883         DWC_LOCK(sc);
  884         return (m0);
  885 }
  886 
  887 static int
  888 setup_dma(struct dwc_softc *sc)
  889 {
  890         struct mbuf *m;
  891         int error;
  892         int nidx;
  893         int idx;
  894 
  895         /*
  896          * Set up TX descriptor ring, descriptors, and dma maps.
  897          */
  898         error = bus_dma_tag_create(
  899             bus_get_dma_tag(sc->dev),   /* Parent tag. */
  900             DWC_DESC_RING_ALIGN, 0,     /* alignment, boundary */
  901             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  902             BUS_SPACE_MAXADDR,          /* highaddr */
  903             NULL, NULL,                 /* filter, filterarg */
  904             TX_DESC_SIZE, 1,            /* maxsize, nsegments */
  905             TX_DESC_SIZE,               /* maxsegsize */
  906             0,                          /* flags */
  907             NULL, NULL,                 /* lockfunc, lockarg */
  908             &sc->txdesc_tag);
  909         if (error != 0) {
  910                 device_printf(sc->dev,
  911                     "could not create TX ring DMA tag.\n");
  912                 goto out;
  913         }
  914 
  915         error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
  916             BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
  917             &sc->txdesc_map);
  918         if (error != 0) {
  919                 device_printf(sc->dev,
  920                     "could not allocate TX descriptor ring.\n");
  921                 goto out;
  922         }
  923 
  924         error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
  925             sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
  926             &sc->txdesc_ring_paddr, 0);
  927         if (error != 0) {
  928                 device_printf(sc->dev,
  929                     "could not load TX descriptor ring map.\n");
  930                 goto out;
  931         }
  932 
  933         for (idx = 0; idx < TX_DESC_COUNT; idx++) {
  934                 nidx = next_txidx(sc, idx);
  935                 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
  936                     (nidx * sizeof(struct dwc_hwdesc));
  937         }
  938 
  939         error = bus_dma_tag_create(
  940             bus_get_dma_tag(sc->dev),   /* Parent tag. */
  941             1, 0,                       /* alignment, boundary */
  942             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  943             BUS_SPACE_MAXADDR,          /* highaddr */
  944             NULL, NULL,                 /* filter, filterarg */
  945             MCLBYTES*TX_MAP_MAX_SEGS,   /* maxsize */
  946             TX_MAP_MAX_SEGS,            /* nsegments */
  947             MCLBYTES,                   /* maxsegsize */
  948             0,                          /* flags */
  949             NULL, NULL,                 /* lockfunc, lockarg */
  950             &sc->txbuf_tag);
  951         if (error != 0) {
  952                 device_printf(sc->dev,
  953                     "could not create TX ring DMA tag.\n");
  954                 goto out;
  955         }
  956 
  957         for (idx = 0; idx < TX_MAP_COUNT; idx++) {
  958                 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
  959                     &sc->txbuf_map[idx].map);
  960                 if (error != 0) {
  961                         device_printf(sc->dev,
  962                             "could not create TX buffer DMA map.\n");
  963                         goto out;
  964                 }
  965         }
  966 
  967         for (idx = 0; idx < TX_DESC_COUNT; idx++)
  968                 dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false);
  969 
  970         /*
  971          * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
  972          */
  973         error = bus_dma_tag_create(
  974             bus_get_dma_tag(sc->dev),   /* Parent tag. */
  975             DWC_DESC_RING_ALIGN, 0,     /* alignment, boundary */
  976             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
  977             BUS_SPACE_MAXADDR,          /* highaddr */
  978             NULL, NULL,                 /* filter, filterarg */
  979             RX_DESC_SIZE, 1,            /* maxsize, nsegments */
  980             RX_DESC_SIZE,               /* maxsegsize */
  981             0,                          /* flags */
  982             NULL, NULL,                 /* lockfunc, lockarg */
  983             &sc->rxdesc_tag);
  984         if (error != 0) {
  985                 device_printf(sc->dev,
  986                     "could not create RX ring DMA tag.\n");
  987                 goto out;
  988         }
  989 
  990         error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
  991             BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
  992             &sc->rxdesc_map);
  993         if (error != 0) {
  994                 device_printf(sc->dev,
  995                     "could not allocate RX descriptor ring.\n");
  996                 goto out;
  997         }
  998 
  999         error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
 1000             sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
 1001             &sc->rxdesc_ring_paddr, 0);
 1002         if (error != 0) {
 1003                 device_printf(sc->dev,
 1004                     "could not load RX descriptor ring map.\n");
 1005                 goto out;
 1006         }
 1007 
 1008         error = bus_dma_tag_create(
 1009             bus_get_dma_tag(sc->dev),   /* Parent tag. */
 1010             1, 0,                       /* alignment, boundary */
 1011             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
 1012             BUS_SPACE_MAXADDR,          /* highaddr */
 1013             NULL, NULL,                 /* filter, filterarg */
 1014             MCLBYTES, 1,                /* maxsize, nsegments */
 1015             MCLBYTES,                   /* maxsegsize */
 1016             0,                          /* flags */
 1017             NULL, NULL,                 /* lockfunc, lockarg */
 1018             &sc->rxbuf_tag);
 1019         if (error != 0) {
 1020                 device_printf(sc->dev,
 1021                     "could not create RX buf DMA tag.\n");
 1022                 goto out;
 1023         }
 1024 
 1025         for (idx = 0; idx < RX_DESC_COUNT; idx++) {
 1026                 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
 1027                     &sc->rxbuf_map[idx].map);
 1028                 if (error != 0) {
 1029                         device_printf(sc->dev,
 1030                             "could not create RX buffer DMA map.\n");
 1031                         goto out;
 1032                 }
 1033                 if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
 1034                         device_printf(sc->dev, "Could not alloc mbuf\n");
 1035                         error = ENOMEM;
 1036                         goto out;
 1037                 }
 1038                 if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) {
 1039                         device_printf(sc->dev,
 1040                             "could not create new RX buffer.\n");
 1041                         goto out;
 1042                 }
 1043         }
 1044 
 1045 out:
 1046         if (error != 0)
 1047                 return (ENXIO);
 1048 
 1049         return (0);
 1050 }
 1051 
 1052 static void
 1053 free_dma(struct dwc_softc *sc)
 1054 {
 1055         bus_dmamap_t map;
 1056         int idx;
 1057 
 1058         /* Clean up RX DMA resources and free mbufs. */
 1059         for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
 1060                 if ((map = sc->rxbuf_map[idx].map) != NULL) {
 1061                         bus_dmamap_unload(sc->rxbuf_tag, map);
 1062                         bus_dmamap_destroy(sc->rxbuf_tag, map);
 1063                         m_freem(sc->rxbuf_map[idx].mbuf);
 1064                 }
 1065         }
 1066         if (sc->rxbuf_tag != NULL)
 1067                 bus_dma_tag_destroy(sc->rxbuf_tag);
 1068         if (sc->rxdesc_map != NULL) {
 1069                 bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
 1070                 bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
 1071                     sc->rxdesc_map);
 1072         }
 1073         if (sc->rxdesc_tag != NULL)
 1074                 bus_dma_tag_destroy(sc->rxdesc_tag);
 1075 
 1076         /* Clean up TX DMA resources. */
 1077         for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
 1078                 if ((map = sc->txbuf_map[idx].map) != NULL) {
 1079                         /* TX maps are already unloaded. */
 1080                         bus_dmamap_destroy(sc->txbuf_tag, map);
 1081                 }
 1082         }
 1083         if (sc->txbuf_tag != NULL)
 1084                 bus_dma_tag_destroy(sc->txbuf_tag);
 1085         if (sc->txdesc_map != NULL) {
 1086                 bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
 1087                 bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
 1088                     sc->txdesc_map);
 1089         }
 1090         if (sc->txdesc_tag != NULL)
 1091                 bus_dma_tag_destroy(sc->txdesc_tag);
 1092 }
 1093 
 1094 /*
 1095  * if_ functions
 1096  */
 1097 
 1098 static void
 1099 dwc_txstart_locked(struct dwc_softc *sc)
 1100 {
 1101         if_t ifp;
 1102         struct mbuf *m;
 1103         int enqueued;
 1104 
 1105         DWC_ASSERT_LOCKED(sc);
 1106 
 1107         if (!sc->link_is_up)
 1108                 return;
 1109 
 1110         ifp = sc->ifp;
 1111 
 1112         if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
 1113             IFF_DRV_RUNNING)
 1114                 return;
 1115 
 1116         enqueued = 0;
 1117 
 1118         for (;;) {
 1119                 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
 1120                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
 1121                         break;
 1122                 }
 1123 
 1124                 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
 1125                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
 1126                         break;
 1127                 }
 1128 
 1129                 m = if_dequeue(ifp);
 1130                 if (m == NULL)
 1131                         break;
 1132                 if (dwc_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
 1133                         if_sendq_prepend(ifp, m);
 1134                         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
 1135                         break;
 1136                 }
 1137                 if_bpfmtap(ifp, m);
 1138                 sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
 1139                 sc->tx_mapcount++;
 1140                 ++enqueued;
 1141         }
 1142 
 1143         if (enqueued != 0) {
 1144                 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
 1145                 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
 1146         }
 1147 }
 1148 
 1149 static void
 1150 dwc_txstart(if_t ifp)
 1151 {
 1152         struct dwc_softc *sc = if_getsoftc(ifp);
 1153 
 1154         DWC_LOCK(sc);
 1155         dwc_txstart_locked(sc);
 1156         DWC_UNLOCK(sc);
 1157 }
 1158 
 1159 static void
 1160 dwc_init_locked(struct dwc_softc *sc)
 1161 {
 1162         if_t ifp = sc->ifp;
 1163 
 1164         DWC_ASSERT_LOCKED(sc);
 1165 
 1166         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 1167                 return;
 1168 
 1169         /*
 1170          * Call mii_mediachg() which will call back into dwc_miibus_statchg()
 1171          * to set up the remaining config registers based on current media.
 1172          */
 1173         mii_mediachg(sc->mii_softc);
 1174 
 1175         dwc_setup_rxfilter(sc);
 1176         dwc_setup_core(sc);
 1177         dwc_enable_mac(sc, true);
 1178         dwc_enable_csum_offload(sc);
 1179         dwc_init_dma(sc);
 1180 
 1181         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
 1182 
 1183         callout_reset(&sc->dwc_callout, hz, dwc_tick, sc);
 1184 }
 1185 
 1186 static void
 1187 dwc_init(void *if_softc)
 1188 {
 1189         struct dwc_softc *sc = if_softc;
 1190 
 1191         DWC_LOCK(sc);
 1192         dwc_init_locked(sc);
 1193         DWC_UNLOCK(sc);
 1194 }
 1195 
 1196 static void
 1197 dwc_stop_locked(struct dwc_softc *sc)
 1198 {
 1199         if_t ifp;
 1200 
 1201         DWC_ASSERT_LOCKED(sc);
 1202 
 1203         ifp = sc->ifp;
 1204         if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 1205         sc->tx_watchdog_count = 0;
 1206         sc->stats_harvest_count = 0;
 1207 
 1208         callout_stop(&sc->dwc_callout);
 1209 
 1210         dwc_stop_dma(sc);
 1211         dwc_enable_mac(sc, false);
 1212 }
 1213 
 1214 static int
 1215 dwc_ioctl(if_t ifp, u_long cmd, caddr_t data)
 1216 {
 1217         struct dwc_softc *sc;
 1218         struct mii_data *mii;
 1219         struct ifreq *ifr;
 1220         int flags, mask, error;
 1221 
 1222         sc = if_getsoftc(ifp);
 1223         ifr = (struct ifreq *)data;
 1224 
 1225         error = 0;
 1226         switch (cmd) {
 1227         case SIOCSIFFLAGS:
 1228                 DWC_LOCK(sc);
 1229                 if (if_getflags(ifp) & IFF_UP) {
 1230                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 1231                                 flags = if_getflags(ifp) ^ sc->if_flags;
 1232                                 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
 1233                                         dwc_setup_rxfilter(sc);
 1234                         } else {
 1235                                 if (!sc->is_detaching)
 1236                                         dwc_init_locked(sc);
 1237                         }
 1238                 } else {
 1239                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
 1240                                 dwc_stop_locked(sc);
 1241                 }
 1242                 sc->if_flags = if_getflags(ifp);
 1243                 DWC_UNLOCK(sc);
 1244                 break;
 1245         case SIOCADDMULTI:
 1246         case SIOCDELMULTI:
 1247                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 1248                         DWC_LOCK(sc);
 1249                         dwc_setup_rxfilter(sc);
 1250                         DWC_UNLOCK(sc);
 1251                 }
 1252                 break;
 1253         case SIOCSIFMEDIA:
 1254         case SIOCGIFMEDIA:
 1255                 mii = sc->mii_softc;
 1256                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1257                 break;
 1258         case SIOCSIFCAP:
 1259                 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
 1260                 if (mask & IFCAP_VLAN_MTU) {
 1261                         /* No work to do except acknowledge the change took */
 1262                         if_togglecapenable(ifp, IFCAP_VLAN_MTU);
 1263                 }
 1264                 if (mask & IFCAP_RXCSUM)
 1265                         if_togglecapenable(ifp, IFCAP_RXCSUM);
 1266                 if (mask & IFCAP_TXCSUM)
 1267                         if_togglecapenable(ifp, IFCAP_TXCSUM);
 1268                 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
 1269                         if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
 1270                 else
 1271                         if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
 1272 
 1273                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
 1274                         DWC_LOCK(sc);
 1275                         dwc_enable_csum_offload(sc);
 1276                         DWC_UNLOCK(sc);
 1277                 }
 1278                 break;
 1279 
 1280         default:
 1281                 error = ether_ioctl(ifp, cmd, data);
 1282                 break;
 1283         }
 1284 
 1285         return (error);
 1286 }
 1287 
 1288 /*
 1289  * Interrupts functions
 1290  */
 1291 
 1292 static void
 1293 dwc_txfinish_locked(struct dwc_softc *sc)
 1294 {
 1295         struct dwc_bufmap *bmap;
 1296         struct dwc_hwdesc *desc;
 1297         if_t ifp;
 1298         int idx, last_idx;
 1299         bool map_finished;
 1300 
 1301         DWC_ASSERT_LOCKED(sc);
 1302 
 1303         ifp = sc->ifp;
 1304         /* check if all descriptors of the map are done */
 1305         while (sc->tx_map_tail != sc->tx_map_head) {
 1306                 map_finished = true;
 1307                 bmap = &sc->txbuf_map[sc->tx_map_tail];
 1308                 idx = sc->tx_desc_tail;
 1309                 last_idx = next_txidx(sc, bmap->last_desc_idx);
 1310                 while (idx != last_idx) {
 1311                         desc = &sc->txdesc_ring[idx];
 1312                         if ((desc->desc0 & TDESC0_OWN) != 0) {
 1313                                 map_finished = false;
 1314                                 break;
 1315                         }
 1316                         idx = next_txidx(sc, idx);
 1317                 }
 1318 
 1319                 if (!map_finished)
 1320                         break;
 1321                 bus_dmamap_sync(sc->txbuf_tag, bmap->map,
 1322                     BUS_DMASYNC_POSTWRITE);
 1323                 bus_dmamap_unload(sc->txbuf_tag, bmap->map);
 1324                 m_freem(bmap->mbuf);
 1325                 bmap->mbuf = NULL;
 1326                 sc->tx_mapcount--;
 1327                 while (sc->tx_desc_tail != last_idx) {
 1328                         dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false);
 1329                         sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
 1330                 }
 1331                 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
 1332                 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
 1333                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 1334         }
 1335 
 1336         /* If there are no buffers outstanding, muzzle the watchdog. */
 1337         if (sc->tx_desc_tail == sc->tx_desc_head) {
 1338                 sc->tx_watchdog_count = 0;
 1339         }
 1340 }
 1341 
 1342 static void
 1343 dwc_rxfinish_locked(struct dwc_softc *sc)
 1344 {
 1345         struct mbuf *m;
 1346         int error, idx;
 1347         struct dwc_hwdesc *desc;
 1348 
 1349         DWC_ASSERT_LOCKED(sc);
 1350         for (;;) {
 1351                 idx = sc->rx_idx;
 1352                 desc = sc->rxdesc_ring + idx;
 1353                 if ((desc->desc0 & RDESC0_OWN) != 0)
 1354                         break;
 1355 
 1356                 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
 1357                 if (m == NULL) {
 1358                         wmb();
 1359                         desc->desc0 = RDESC0_OWN;
 1360                         wmb();
 1361                 } else {
 1362                         /* We cannot create hole in RX ring */
 1363                         error = dwc_setup_rxbuf(sc, idx, m);
 1364                         if (error != 0)
 1365                                 panic("dwc_setup_rxbuf failed:  error %d\n",
 1366                                     error);
 1367 
 1368                 }
 1369                 sc->rx_idx = next_rxidx(sc, sc->rx_idx);
 1370         }
 1371 }
 1372 
 1373 static void
 1374 dwc_intr(void *arg)
 1375 {
 1376         struct dwc_softc *sc;
 1377         uint32_t reg;
 1378 
 1379         sc = arg;
 1380 
 1381         DWC_LOCK(sc);
 1382 
 1383         reg = READ4(sc, INTERRUPT_STATUS);
 1384         if (reg)
 1385                 READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS);
 1386 
 1387         reg = READ4(sc, DMA_STATUS);
 1388         if (reg & DMA_STATUS_NIS) {
 1389                 if (reg & DMA_STATUS_RI)
 1390                         dwc_rxfinish_locked(sc);
 1391 
 1392                 if (reg & DMA_STATUS_TI) {
 1393                         dwc_txfinish_locked(sc);
 1394                         dwc_txstart_locked(sc);
 1395                 }
 1396         }
 1397 
 1398         if (reg & DMA_STATUS_AIS) {
 1399                 if (reg & DMA_STATUS_FBI) {
 1400                         /* Fatal bus error */
 1401                         device_printf(sc->dev,
 1402                             "Ethernet DMA error, restarting controller.\n");
 1403                         dwc_stop_locked(sc);
 1404                         dwc_init_locked(sc);
 1405                 }
 1406         }
 1407 
 1408         WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
 1409         DWC_UNLOCK(sc);
 1410 }
 1411 
 1412 /*
 1413  * Stats
 1414  */
 1415 
 1416 static void dwc_clear_stats(struct dwc_softc *sc)
 1417 {
 1418         uint32_t reg;
 1419 
 1420         reg = READ4(sc, MMC_CONTROL);
 1421         reg |= (MMC_CONTROL_CNTRST);
 1422         WRITE4(sc, MMC_CONTROL, reg);
 1423 }
 1424 
 1425 static void
 1426 dwc_harvest_stats(struct dwc_softc *sc)
 1427 {
 1428         if_t ifp;
 1429 
 1430         /* We don't need to harvest too often. */
 1431         if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL)
 1432                 return;
 1433 
 1434         sc->stats_harvest_count = 0;
 1435         ifp = sc->ifp;
 1436 
 1437         if_inc_counter(ifp, IFCOUNTER_IERRORS,
 1438             READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) +
 1439             READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) +
 1440             READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) +
 1441             READ4(sc, RXLENGTHERROR));
 1442 
 1443         if_inc_counter(ifp, IFCOUNTER_OERRORS,
 1444             READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) +
 1445             READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR));
 1446 
 1447         if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
 1448             READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL));
 1449 
 1450         dwc_clear_stats(sc);
 1451 }
 1452 
 1453 static void
 1454 dwc_tick(void *arg)
 1455 {
 1456         struct dwc_softc *sc;
 1457         if_t ifp;
 1458         int link_was_up;
 1459 
 1460         sc = arg;
 1461 
 1462         DWC_ASSERT_LOCKED(sc);
 1463 
 1464         ifp = sc->ifp;
 1465 
 1466         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
 1467             return;
 1468 
 1469         /*
 1470          * Typical tx watchdog.  If this fires it indicates that we enqueued
 1471          * packets for output and never got a txdone interrupt for them.  Maybe
 1472          * it's a missed interrupt somehow, just pretend we got one.
 1473          */
 1474         if (sc->tx_watchdog_count > 0) {
 1475                 if (--sc->tx_watchdog_count == 0) {
 1476                         dwc_txfinish_locked(sc);
 1477                 }
 1478         }
 1479 
 1480         /* Gather stats from hardware counters. */
 1481         dwc_harvest_stats(sc);
 1482 
 1483         /* Check the media status. */
 1484         link_was_up = sc->link_is_up;
 1485         mii_tick(sc->mii_softc);
 1486         if (sc->link_is_up && !link_was_up)
 1487                 dwc_txstart_locked(sc);
 1488 
 1489         /* Schedule another check one second from now. */
 1490         callout_reset(&sc->dwc_callout, hz, dwc_tick, sc);
 1491 }
 1492 
 1493 /*
 1494  * Probe/Attach functions
 1495  */
 1496 
 1497 #define GPIO_ACTIVE_LOW 1
 1498 
 1499 static int
 1500 dwc_reset(device_t dev)
 1501 {
 1502         pcell_t gpio_prop[4];
 1503         pcell_t delay_prop[3];
 1504         phandle_t node, gpio_node;
 1505         device_t gpio;
 1506         uint32_t pin, flags;
 1507         uint32_t pin_value;
 1508 
 1509         node = ofw_bus_get_node(dev);
 1510         if (OF_getencprop(node, "snps,reset-gpio",
 1511             gpio_prop, sizeof(gpio_prop)) <= 0)
 1512                 return (0);
 1513 
 1514         if (OF_getencprop(node, "snps,reset-delays-us",
 1515             delay_prop, sizeof(delay_prop)) <= 0) {
 1516                 device_printf(dev,
 1517                     "Wrong property for snps,reset-delays-us");
 1518                 return (ENXIO);
 1519         }
 1520 
 1521         gpio_node = OF_node_from_xref(gpio_prop[0]);
 1522         if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) {
 1523                 device_printf(dev,
 1524                     "Can't find gpio controller for phy reset\n");
 1525                 return (ENXIO);
 1526         }
 1527 
 1528         if (GPIO_MAP_GPIOS(gpio, node, gpio_node,
 1529             nitems(gpio_prop) - 1,
 1530             gpio_prop + 1, &pin, &flags) != 0) {
 1531                 device_printf(dev, "Can't map gpio for phy reset\n");
 1532                 return (ENXIO);
 1533         }
 1534 
 1535         pin_value = GPIO_PIN_LOW;
 1536         if (OF_hasprop(node, "snps,reset-active-low"))
 1537                 pin_value = GPIO_PIN_HIGH;
 1538 
 1539         GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT);
 1540         GPIO_PIN_SET(gpio, pin, pin_value);
 1541         DELAY(delay_prop[0] * 5);
 1542         GPIO_PIN_SET(gpio, pin, !pin_value);
 1543         DELAY(delay_prop[1] * 5);
 1544         GPIO_PIN_SET(gpio, pin, pin_value);
 1545         DELAY(delay_prop[2] * 5);
 1546 
 1547         return (0);
 1548 }
 1549 
 1550 static int
 1551 dwc_clock_init(device_t dev)
 1552 {
 1553         hwreset_t rst;
 1554         clk_t clk;
 1555         int error;
 1556         int64_t freq;
 1557 
 1558         /* Enable clocks */
 1559         if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk) == 0) {
 1560                 error = clk_enable(clk);
 1561                 if (error != 0) {
 1562                         device_printf(dev, "could not enable main clock\n");
 1563                         return (error);
 1564                 }
 1565                 if (bootverbose) {
 1566                         clk_get_freq(clk, &freq);
 1567                         device_printf(dev, "MAC clock(%s) freq: %jd\n",
 1568                                         clk_get_name(clk), (intmax_t)freq);
 1569                 }
 1570         }
 1571         else {
 1572                 device_printf(dev, "could not find clock stmmaceth\n");
 1573         }
 1574 
 1575         /* De-assert reset */
 1576         if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst) == 0) {
 1577                 error = hwreset_deassert(rst);
 1578                 if (error != 0) {
 1579                         device_printf(dev, "could not de-assert reset\n");
 1580                         return (error);
 1581                 }
 1582         }
 1583 
 1584         return (0);
 1585 }
 1586 
 1587 static int
 1588 dwc_probe(device_t dev)
 1589 {
 1590 
 1591         if (!ofw_bus_status_okay(dev))
 1592                 return (ENXIO);
 1593 
 1594         if (!ofw_bus_is_compatible(dev, "snps,dwmac"))
 1595                 return (ENXIO);
 1596 
 1597         device_set_desc(dev, "Gigabit Ethernet Controller");
 1598         return (BUS_PROBE_DEFAULT);
 1599 }
 1600 
 1601 static int
 1602 dwc_attach(device_t dev)
 1603 {
 1604         uint8_t macaddr[ETHER_ADDR_LEN];
 1605         struct dwc_softc *sc;
 1606         if_t ifp;
 1607         int error, i;
 1608         uint32_t reg;
 1609         phandle_t node;
 1610         uint32_t txpbl, rxpbl, pbl;
 1611         bool nopblx8 = false;
 1612         bool fixed_burst = false;
 1613 
 1614         sc = device_get_softc(dev);
 1615         sc->dev = dev;
 1616         sc->rx_idx = 0;
 1617         sc->tx_desccount = TX_DESC_COUNT;
 1618         sc->tx_mapcount = 0;
 1619         sc->mii_clk = IF_DWC_MII_CLK(dev);
 1620         sc->mactype = IF_DWC_MAC_TYPE(dev);
 1621 
 1622         node = ofw_bus_get_node(dev);
 1623         switch (mii_fdt_get_contype(node)) {
 1624         case MII_CONTYPE_RGMII:
 1625         case MII_CONTYPE_RGMII_ID:
 1626         case MII_CONTYPE_RGMII_RXID:
 1627         case MII_CONTYPE_RGMII_TXID:
 1628                 sc->phy_mode = PHY_MODE_RGMII;
 1629                 break;
 1630         case MII_CONTYPE_RMII:
 1631                 sc->phy_mode = PHY_MODE_RMII;
 1632                 break;
 1633         case MII_CONTYPE_MII:
 1634                 sc->phy_mode = PHY_MODE_MII;
 1635                 break;
 1636         default:
 1637                 device_printf(dev, "Unsupported MII type\n");
 1638                 return (ENXIO);
 1639         }
 1640 
 1641         if (OF_getencprop(node, "snps,pbl", &pbl, sizeof(uint32_t)) <= 0)
 1642                 pbl = BUS_MODE_DEFAULT_PBL;
 1643         if (OF_getencprop(node, "snps,txpbl", &txpbl, sizeof(uint32_t)) <= 0)
 1644                 txpbl = pbl;
 1645         if (OF_getencprop(node, "snps,rxpbl", &rxpbl, sizeof(uint32_t)) <= 0)
 1646                 rxpbl = pbl;
 1647         if (OF_hasprop(node, "snps,no-pbl-x8") == 1)
 1648                 nopblx8 = true;
 1649         if (OF_hasprop(node, "snps,fixed-burst") == 1)
 1650                 fixed_burst = true;
 1651 
 1652         if (IF_DWC_INIT(dev) != 0)
 1653                 return (ENXIO);
 1654 
 1655         if (dwc_clock_init(dev) != 0)
 1656                 return (ENXIO);
 1657 
 1658         if (bus_alloc_resources(dev, dwc_spec, sc->res)) {
 1659                 device_printf(dev, "could not allocate resources\n");
 1660                 return (ENXIO);
 1661         }
 1662 
 1663         /* Read MAC before reset */
 1664         dwc_get_hwaddr(sc, macaddr);
 1665 
 1666         /* Reset the PHY if needed */
 1667         if (dwc_reset(dev) != 0) {
 1668                 device_printf(dev, "Can't reset the PHY\n");
 1669                 bus_release_resources(dev, dwc_spec, sc->res);
 1670                 return (ENXIO);
 1671         }
 1672 
 1673         /* Reset */
 1674         reg = READ4(sc, BUS_MODE);
 1675         reg |= (BUS_MODE_SWR);
 1676         WRITE4(sc, BUS_MODE, reg);
 1677 
 1678         for (i = 0; i < MAC_RESET_TIMEOUT; i++) {
 1679                 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
 1680                         break;
 1681                 DELAY(10);
 1682         }
 1683         if (i >= MAC_RESET_TIMEOUT) {
 1684                 device_printf(sc->dev, "Can't reset DWC.\n");
 1685                 bus_release_resources(dev, dwc_spec, sc->res);
 1686                 return (ENXIO);
 1687         }
 1688 
 1689         reg = BUS_MODE_USP;
 1690         if (!nopblx8)
 1691                 reg |= BUS_MODE_EIGHTXPBL;
 1692         reg |= (txpbl << BUS_MODE_PBL_SHIFT);
 1693         reg |= (rxpbl << BUS_MODE_RPBL_SHIFT);
 1694         if (fixed_burst)
 1695                 reg |= BUS_MODE_FIXEDBURST;
 1696 
 1697         WRITE4(sc, BUS_MODE, reg);
 1698 
 1699         /*
 1700          * DMA must be stop while changing descriptor list addresses.
 1701          */
 1702         reg = READ4(sc, OPERATION_MODE);
 1703         reg &= ~(MODE_ST | MODE_SR);
 1704         WRITE4(sc, OPERATION_MODE, reg);
 1705 
 1706         if (setup_dma(sc)) {
 1707                 bus_release_resources(dev, dwc_spec, sc->res);
 1708                 return (ENXIO);
 1709         }
 1710 
 1711         /* Setup addresses */
 1712         WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
 1713         WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
 1714 
 1715         mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
 1716             MTX_NETWORK_LOCK, MTX_DEF);
 1717 
 1718         callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0);
 1719 
 1720         /* Setup interrupt handler. */
 1721         error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
 1722             NULL, dwc_intr, sc, &sc->intr_cookie);
 1723         if (error != 0) {
 1724                 device_printf(dev, "could not setup interrupt handler.\n");
 1725                 bus_release_resources(dev, dwc_spec, sc->res);
 1726                 return (ENXIO);
 1727         }
 1728 
 1729         /* Set up the ethernet interface. */
 1730         sc->ifp = ifp = if_alloc(IFT_ETHER);
 1731 
 1732         if_setsoftc(ifp, sc);
 1733         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 1734         if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
 1735         if_setstartfn(ifp, dwc_txstart);
 1736         if_setioctlfn(ifp, dwc_ioctl);
 1737         if_setinitfn(ifp, dwc_init);
 1738         if_setsendqlen(ifp, TX_MAP_COUNT - 1);
 1739         if_setsendqready(sc->ifp);
 1740         if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
 1741         if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
 1742         if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
 1743 
 1744         /* Attach the mii driver. */
 1745         error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change,
 1746             dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY,
 1747             MII_OFFSET_ANY, 0);
 1748 
 1749         if (error != 0) {
 1750                 device_printf(dev, "PHY attach failed\n");
 1751                 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
 1752                 bus_release_resources(dev, dwc_spec, sc->res);
 1753                 return (ENXIO);
 1754         }
 1755         sc->mii_softc = device_get_softc(sc->miibus);
 1756 
 1757         /* All ready to run, attach the ethernet interface. */
 1758         ether_ifattach(ifp, macaddr);
 1759         sc->is_attached = true;
 1760 
 1761         return (0);
 1762 }
 1763 
 1764 static int
 1765 dwc_detach(device_t dev)
 1766 {
 1767         struct dwc_softc *sc;
 1768 
 1769         sc = device_get_softc(dev);
 1770 
 1771         /*
 1772          * Disable and tear down interrupts before anything else, so we don't
 1773          * race with the handler.
 1774          */
 1775         WRITE4(sc, INTERRUPT_ENABLE, 0);
 1776         if (sc->intr_cookie != NULL) {
 1777                 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
 1778         }
 1779 
 1780         if (sc->is_attached) {
 1781                 DWC_LOCK(sc);
 1782                 sc->is_detaching = true;
 1783                 dwc_stop_locked(sc);
 1784                 DWC_UNLOCK(sc);
 1785                 callout_drain(&sc->dwc_callout);
 1786                 ether_ifdetach(sc->ifp);
 1787         }
 1788 
 1789         if (sc->miibus != NULL) {
 1790                 device_delete_child(dev, sc->miibus);
 1791                 sc->miibus = NULL;
 1792         }
 1793         bus_generic_detach(dev);
 1794 
 1795         /* Free DMA descriptors */
 1796         free_dma(sc);
 1797 
 1798         if (sc->ifp != NULL) {
 1799                 if_free(sc->ifp);
 1800                 sc->ifp = NULL;
 1801         }
 1802 
 1803         bus_release_resources(dev, dwc_spec, sc->res);
 1804 
 1805         mtx_destroy(&sc->mtx);
 1806         return (0);
 1807 }
 1808 
 1809 static device_method_t dwc_methods[] = {
 1810         DEVMETHOD(device_probe,         dwc_probe),
 1811         DEVMETHOD(device_attach,        dwc_attach),
 1812         DEVMETHOD(device_detach,        dwc_detach),
 1813 
 1814         /* MII Interface */
 1815         DEVMETHOD(miibus_readreg,       dwc_miibus_read_reg),
 1816         DEVMETHOD(miibus_writereg,      dwc_miibus_write_reg),
 1817         DEVMETHOD(miibus_statchg,       dwc_miibus_statchg),
 1818 
 1819         { 0, 0 }
 1820 };
 1821 
 1822 driver_t dwc_driver = {
 1823         "dwc",
 1824         dwc_methods,
 1825         sizeof(struct dwc_softc),
 1826 };
 1827 
 1828 DRIVER_MODULE(dwc, simplebus, dwc_driver, 0, 0);
 1829 DRIVER_MODULE(miibus, dwc, miibus_driver, 0, 0);
 1830 
 1831 MODULE_DEPEND(dwc, ether, 1, 1, 1);
 1832 MODULE_DEPEND(dwc, miibus, 1, 1, 1);

Cache object: 9070b41bbc904cfc8fb0f1467038ed21


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.