The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/rt/if_rt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2015-2016, Stanislav Galabov
    5  * Copyright (c) 2014, Aleksandr A. Mityaev
    6  * Copyright (c) 2011, Aleksandr Rybalko
    7  * based on hard work
    8  * by Alexander Egorenkov <egorenar@gmail.com>
    9  * and by Damien Bergamini <damien.bergamini@free.fr>
   10  * All rights reserved.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice unmodified, this list of conditions, and the following
   17  *    disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 #include "if_rtvar.h"
   39 #include "if_rtreg.h"
   40 
   41 #include <sys/kenv.h>
   42 
   43 #include <net/if.h>
   44 #include <net/if_var.h>
   45 #include <net/if_arp.h>
   46 #include <net/ethernet.h>
   47 #include <net/if_dl.h>
   48 #include <net/if_media.h>
   49 #include <net/if_types.h>
   50 #include <net/if_vlan_var.h>
   51 
   52 #include <net/bpf.h>
   53 
   54 #include <machine/bus.h>
   55 #include <machine/cache.h>
   56 #include <machine/cpufunc.h>
   57 #include <machine/resource.h>
   58 #include <vm/vm_param.h>
   59 #include <vm/vm.h>
   60 #include <vm/pmap.h>
   61 #include <machine/pmap.h>
   62 #include <sys/bus.h>
   63 #include <sys/rman.h>
   64 
   65 #include "opt_platform.h"
   66 #include "opt_rt305x.h"
   67 
   68 #ifdef FDT
   69 #include <dev/ofw/openfirm.h>
   70 #include <dev/ofw/ofw_bus.h>
   71 #include <dev/ofw/ofw_bus_subr.h>
   72 #endif
   73 
   74 #include <dev/mii/mii.h>
   75 #include <dev/mii/miivar.h>
   76 
   77 #ifdef RT_MDIO
   78 #include <dev/mdio/mdio.h>
   79 #include <dev/etherswitch/miiproxy.h>
   80 #include "mdio_if.h"
   81 #endif
   82 
   83 #if 0
   84 #include <mips/rt305x/rt305x_sysctlvar.h>
   85 #include <mips/rt305x/rt305xreg.h>
   86 #endif
   87 
   88 #ifdef IF_RT_PHY_SUPPORT
   89 #include "miibus_if.h"
   90 #endif
   91 
   92 /*
   93  * Defines and macros
   94  */
   95 #define RT_MAX_AGG_SIZE                 3840
   96 
   97 #define RT_TX_DATA_SEG0_SIZE            MJUMPAGESIZE
   98 
   99 #define RT_MS(_v, _f)                   (((_v) & _f) >> _f##_S)
  100 #define RT_SM(_v, _f)                   (((_v) << _f##_S) & _f)
  101 
  102 #define RT_TX_WATCHDOG_TIMEOUT          5
  103 
  104 #define RT_CHIPID_RT2880 0x2880
  105 #define RT_CHIPID_RT3050 0x3050
  106 #define RT_CHIPID_RT3883 0x3883
  107 #define RT_CHIPID_RT5350 0x5350
  108 #define RT_CHIPID_MT7620 0x7620
  109 #define RT_CHIPID_MT7621 0x7621
  110 
  111 #ifdef FDT
  112 /* more specific and new models should go first */
  113 static const struct ofw_compat_data rt_compat_data[] = {
  114         { "ralink,rt2880-eth",          RT_CHIPID_RT2880 },
  115         { "ralink,rt3050-eth",          RT_CHIPID_RT3050 },
  116         { "ralink,rt3352-eth",          RT_CHIPID_RT3050 },
  117         { "ralink,rt3883-eth",          RT_CHIPID_RT3883 },
  118         { "ralink,rt5350-eth",          RT_CHIPID_RT5350 },
  119         { "ralink,mt7620a-eth",         RT_CHIPID_MT7620 },
  120         { "mediatek,mt7620-eth",        RT_CHIPID_MT7620 },
  121         { "ralink,mt7621-eth",          RT_CHIPID_MT7621 },
  122         { "mediatek,mt7621-eth",        RT_CHIPID_MT7621 },
  123         { NULL,                         0 }
  124 };
  125 #endif
  126 
  127 /*
  128  * Static function prototypes
  129  */
  130 static int      rt_probe(device_t dev);
  131 static int      rt_attach(device_t dev);
  132 static int      rt_detach(device_t dev);
  133 static int      rt_shutdown(device_t dev);
  134 static int      rt_suspend(device_t dev);
  135 static int      rt_resume(device_t dev);
  136 static void     rt_init_locked(void *priv);
  137 static void     rt_init(void *priv);
  138 static void     rt_stop_locked(void *priv);
  139 static void     rt_stop(void *priv);
  140 static void     rt_start(struct ifnet *ifp);
  141 static int      rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
  142 static void     rt_tx_watchdog(void *arg);
  143 static void     rt_intr(void *arg);
  144 static void     rt_rt5350_intr(void *arg);
  145 static void     rt_tx_coherent_intr(struct rt_softc *sc);
  146 static void     rt_rx_coherent_intr(struct rt_softc *sc);
  147 static void     rt_rx_delay_intr(struct rt_softc *sc);
  148 static void     rt_tx_delay_intr(struct rt_softc *sc);
  149 static void     rt_rx_intr(struct rt_softc *sc, int qid);
  150 static void     rt_tx_intr(struct rt_softc *sc, int qid);
  151 static void     rt_rx_done_task(void *context, int pending);
  152 static void     rt_tx_done_task(void *context, int pending);
  153 static void     rt_periodic_task(void *context, int pending);
  154 static int      rt_rx_eof(struct rt_softc *sc,
  155                     struct rt_softc_rx_ring *ring, int limit);
  156 static void     rt_tx_eof(struct rt_softc *sc,
  157                     struct rt_softc_tx_ring *ring);
  158 static void     rt_update_stats(struct rt_softc *sc);
  159 static void     rt_watchdog(struct rt_softc *sc);
  160 static void     rt_update_raw_counters(struct rt_softc *sc);
  161 static void     rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
  162 static void     rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
  163 static int      rt_txrx_enable(struct rt_softc *sc);
  164 static int      rt_alloc_rx_ring(struct rt_softc *sc,
  165                     struct rt_softc_rx_ring *ring, int qid);
  166 static void     rt_reset_rx_ring(struct rt_softc *sc,
  167                     struct rt_softc_rx_ring *ring);
  168 static void     rt_free_rx_ring(struct rt_softc *sc,
  169                     struct rt_softc_rx_ring *ring);
  170 static int      rt_alloc_tx_ring(struct rt_softc *sc,
  171                     struct rt_softc_tx_ring *ring, int qid);
  172 static void     rt_reset_tx_ring(struct rt_softc *sc,
  173                     struct rt_softc_tx_ring *ring);
  174 static void     rt_free_tx_ring(struct rt_softc *sc,
  175                     struct rt_softc_tx_ring *ring);
  176 static void     rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
  177                     int nseg, int error);
  178 static void     rt_sysctl_attach(struct rt_softc *sc);
  179 #ifdef IF_RT_PHY_SUPPORT
  180 void            rt_miibus_statchg(device_t);
  181 #endif
  182 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
  183 static int      rt_miibus_readreg(device_t, int, int);
  184 static int      rt_miibus_writereg(device_t, int, int, int);
  185 #endif
  186 static int      rt_ifmedia_upd(struct ifnet *);
  187 static void     rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  188 
  189 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  190     "RT driver parameters");
  191 #ifdef IF_RT_DEBUG
  192 static int rt_debug = 0;
  193 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
  194     "RT debug level");
  195 #endif
  196 
  197 static int
  198 rt_probe(device_t dev)
  199 {
  200         struct rt_softc *sc = device_get_softc(dev);
  201         char buf[80];
  202 #ifdef FDT
  203         const struct ofw_compat_data * cd;
  204 
  205         cd = ofw_bus_search_compatible(dev, rt_compat_data);
  206         if (cd->ocd_data == 0)
  207                 return (ENXIO);
  208                 
  209         sc->rt_chipid = (unsigned int)(cd->ocd_data);
  210 #else
  211 #if defined(MT7620)
  212         sc->rt_chipid = RT_CHIPID_MT7620;
  213 #elif defined(MT7621)
  214         sc->rt_chipid = RT_CHIPID_MT7621;
  215 #elif defined(RT5350)
  216         sc->rt_chipid = RT_CHIPID_RT5350;
  217 #else
  218         sc->rt_chipid = RT_CHIPID_RT3050;
  219 #endif
  220 #endif
  221         snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
  222                 sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
  223         device_set_desc_copy(dev, buf);
  224         return (BUS_PROBE_GENERIC);
  225 }
  226 
  227 /*
  228  * macaddr_atoi - translate string MAC address to uint8_t array
  229  */
  230 static int
  231 macaddr_atoi(const char *str, uint8_t *mac)
  232 {
  233         int count, i;
  234         unsigned int amac[ETHER_ADDR_LEN];      /* Aligned version */
  235 
  236         count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
  237             &amac[0], &amac[1], &amac[2],
  238             &amac[3], &amac[4], &amac[5]);
  239         if (count < ETHER_ADDR_LEN) {
  240                 memset(mac, 0, ETHER_ADDR_LEN);
  241                 return (1);
  242         }
  243 
  244         /* Copy aligned to result */
  245         for (i = 0; i < ETHER_ADDR_LEN; i ++)
  246                 mac[i] = (amac[i] & 0xff);
  247 
  248         return (0);
  249 }
  250 
  251 #ifdef USE_GENERATED_MAC_ADDRESS
  252 /*
  253  * generate_mac(uin8_t *mac)
  254  * This is MAC address generator for cases when real device MAC address
  255  * unknown or not yet accessible.
  256  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
  257  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
  258  *
  259  * Output - MAC address, that do not change between reboots, if hints or
  260  * bootloader info unchange.
  261  */
  262 static void
  263 generate_mac(uint8_t *mac)
  264 {
  265         unsigned char *cp;
  266         int i = 0;
  267         uint32_t crc = 0xffffffff;
  268 
  269         /* Generate CRC32 on kenv */
  270         for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
  271                 crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
  272         }
  273         crc = ~crc;
  274 
  275         mac[0] = 'b';
  276         mac[1] = 's';
  277         mac[2] = 'd';
  278         mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
  279         mac[4] = (crc >> 8) & 0xff;
  280         mac[5] = crc & 0xff;
  281 }
  282 #endif
  283 
  284 /*
  285  * ether_request_mac - try to find usable MAC address.
  286  */
  287 static int
  288 ether_request_mac(device_t dev, uint8_t *mac)
  289 {
  290         const char *var;
  291 
  292         /*
  293          * "ethaddr" is passed via envp on RedBoot platforms
  294          * "kmac" is passed via argv on RouterBOOT platforms
  295          */
  296 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
  297         if ((var = kern_getenv("ethaddr")) != NULL ||
  298             (var = kern_getenv("kmac")) != NULL ) {
  299                 if(!macaddr_atoi(var, mac)) {
  300                         printf("%s: use %s macaddr from KENV\n",
  301                             device_get_nameunit(dev), var);
  302                         freeenv(var);
  303                         return (0);
  304                 }
  305                 freeenv(var);
  306         }
  307 #endif
  308 
  309         /*
  310          * Try from hints
  311          * hint.[dev].[unit].macaddr
  312          */
  313         if (!resource_string_value(device_get_name(dev),
  314             device_get_unit(dev), "macaddr", &var)) {
  315                 if(!macaddr_atoi(var, mac)) {
  316                         printf("%s: use %s macaddr from hints\n",
  317                             device_get_nameunit(dev), var);
  318                         return (0);
  319                 }
  320         }
  321 
  322 #ifdef USE_GENERATED_MAC_ADDRESS
  323         generate_mac(mac);
  324 
  325         device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
  326             "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  327 #else
  328         /* Hardcoded */
  329         mac[0] = 0x00;
  330         mac[1] = 0x18;
  331         mac[2] = 0xe7;
  332         mac[3] = 0xd5;
  333         mac[4] = 0x83;
  334         mac[5] = 0x90;
  335 
  336         device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
  337 #endif
  338 
  339         return (0);
  340 }
  341 
  342 /*
  343  * Reset hardware
  344  */
  345 static void
  346 reset_freng(struct rt_softc *sc)
  347 {
  348         /* XXX hard reset kills everything so skip it ... */
  349         return;
  350 }
  351 
  352 static int
  353 rt_attach(device_t dev)
  354 {
  355         struct rt_softc *sc;
  356         struct ifnet *ifp;
  357         int error, i;
  358 #ifdef FDT
  359         phandle_t node;
  360         char fdtval[32];
  361 #endif
  362 
  363         sc = device_get_softc(dev);
  364         sc->dev = dev;
  365 
  366 #ifdef FDT
  367         node = ofw_bus_get_node(sc->dev);
  368 #endif
  369 
  370         mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  371             MTX_DEF | MTX_RECURSE);
  372 
  373         sc->mem_rid = 0;
  374         sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
  375             RF_ACTIVE | RF_SHAREABLE);
  376         if (sc->mem == NULL) {
  377                 device_printf(dev, "could not allocate memory resource\n");
  378                 error = ENXIO;
  379                 goto fail;
  380         }
  381 
  382         sc->bst = rman_get_bustag(sc->mem);
  383         sc->bsh = rman_get_bushandle(sc->mem);
  384 
  385         sc->irq_rid = 0;
  386         sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
  387             RF_ACTIVE);
  388         if (sc->irq == NULL) {
  389                 device_printf(dev,
  390                     "could not allocate interrupt resource\n");
  391                 error = ENXIO;
  392                 goto fail;
  393         }
  394 
  395 #ifdef IF_RT_DEBUG
  396         sc->debug = rt_debug;
  397 
  398         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  399                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  400                 "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
  401 #endif
  402 
  403         /* Reset hardware */
  404         reset_freng(sc);
  405 
  406         if (sc->rt_chipid == RT_CHIPID_MT7620) {
  407                 sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
  408                 sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
  409         } else if (sc->rt_chipid == RT_CHIPID_MT7621) {
  410                 sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
  411                 sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
  412         } else {
  413                 sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
  414                 sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
  415         }
  416 
  417         /* Fill in soc-specific registers map */
  418         switch(sc->rt_chipid) {
  419           case RT_CHIPID_MT7620:
  420           case RT_CHIPID_MT7621:
  421                 sc->gdma1_base = MT7620_GDMA1_BASE;
  422                 /* fallthrough */
  423           case RT_CHIPID_RT5350:
  424                 device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
  425                         sc->rt_chipid >= 0x7600 ? 'M' : 'R',
  426                         sc->rt_chipid, sc->mac_rev);
  427                 /* RT5350: No GDMA, PSE, CDMA, PPE */
  428                 RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
  429                         RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
  430                 sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
  431                 sc->fe_int_status=RT5350_FE_INT_STATUS;
  432                 sc->fe_int_enable=RT5350_FE_INT_ENABLE;
  433                 sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
  434                 sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
  435                 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  436                   sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
  437                   sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
  438                   sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
  439                   sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
  440                 }
  441                 sc->rx_ring_count=2;
  442                 sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
  443                 sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
  444                 sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
  445                 sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
  446                 sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
  447                 sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
  448                 sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
  449                 sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
  450                 sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
  451                 sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
  452                 break;
  453           default:
  454                 device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
  455                         sc->mac_rev);
  456                 sc->gdma1_base = GDMA1_BASE;
  457                 sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
  458                 sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
  459                 sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
  460                 sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
  461                 sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
  462                 for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  463                   sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
  464                   sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
  465                   sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
  466                   sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
  467                 }
  468                 sc->rx_ring_count=1;
  469                 sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
  470                 sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
  471                 sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
  472                 sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
  473                 sc->int_rx_done_mask=INT_RX_DONE;
  474                 sc->int_tx_done_mask=INT_TXQ0_DONE;
  475         }
  476 
  477         if (sc->gdma1_base != 0)
  478                 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
  479                 (
  480                 GDM_ICS_EN | /* Enable IP Csum */
  481                 GDM_TCS_EN | /* Enable TCP Csum */
  482                 GDM_UCS_EN | /* Enable UDP Csum */
  483                 GDM_STRPCRC | /* Strip CRC from packet */
  484                 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
  485                 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
  486                 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
  487                 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
  488                 ));
  489 
  490 #ifdef FDT
  491         if (sc->rt_chipid == RT_CHIPID_RT2880 ||
  492             sc->rt_chipid == RT_CHIPID_RT3883) {
  493                 if (OF_getprop(node, "port-mode", fdtval, sizeof(fdtval)) > 0 &&
  494                     strcmp(fdtval, "gigasw") == 0)
  495                         RT_WRITE(sc, MDIO_CFG, MDIO_2880_GIGA_INIT);
  496                 else
  497                         RT_WRITE(sc, MDIO_CFG, MDIO_2880_100T_INIT);
  498         }
  499 #endif
  500 
  501         /* allocate Tx and Rx rings */
  502         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  503                 error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
  504                 if (error != 0) {
  505                         device_printf(dev, "could not allocate Tx ring #%d\n",
  506                             i);
  507                         goto fail;
  508                 }
  509         }
  510 
  511         sc->tx_ring_mgtqid = 5;
  512         for (i = 0; i < sc->rx_ring_count; i++) {
  513                 error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
  514                 if (error != 0) {
  515                         device_printf(dev, "could not allocate Rx ring\n");
  516                         goto fail;
  517                 }
  518         }
  519 
  520         callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
  521 
  522         ifp = sc->ifp = if_alloc(IFT_ETHER);
  523         if (ifp == NULL) {
  524                 device_printf(dev, "could not if_alloc()\n");
  525                 error = ENOMEM;
  526                 goto fail;
  527         }
  528 
  529         ifp->if_softc = sc;
  530         if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
  531         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  532         ifp->if_init = rt_init;
  533         ifp->if_ioctl = rt_ioctl;
  534         ifp->if_start = rt_start;
  535 #define RT_TX_QLEN      256
  536 
  537         IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
  538         ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
  539         IFQ_SET_READY(&ifp->if_snd);
  540 
  541 #ifdef IF_RT_PHY_SUPPORT
  542         error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
  543             rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
  544         if (error != 0) {
  545                 device_printf(dev, "attaching PHYs failed\n");
  546                 error = ENXIO;
  547                 goto fail;
  548         }
  549 #else
  550         ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
  551         ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
  552             NULL);
  553         ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
  554 
  555 #endif /* IF_RT_PHY_SUPPORT */
  556 
  557         ether_request_mac(dev, sc->mac_addr);
  558         ether_ifattach(ifp, sc->mac_addr);
  559 
  560         /*
  561          * Tell the upper layer(s) we support long frames.
  562          */
  563         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
  564         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  565         ifp->if_capenable |= IFCAP_VLAN_MTU;
  566         ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
  567         ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
  568 
  569         /* init task queue */
  570         NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
  571         TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
  572 
  573         sc->rx_process_limit = 100;
  574 
  575         sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
  576             taskqueue_thread_enqueue, &sc->taskqueue);
  577 
  578         taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
  579             device_get_nameunit(sc->dev));
  580 
  581         TIMEOUT_TASK_INIT(sc->taskqueue, &sc->periodic_task, 0,
  582             rt_periodic_task, sc);
  583 
  584         rt_sysctl_attach(sc);
  585 
  586         /* set up interrupt */
  587         error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
  588             NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
  589             sc->rt_chipid == RT_CHIPID_MT7620 ||
  590             sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
  591             sc, &sc->irqh);
  592         if (error != 0) {
  593                 printf("%s: could not set up interrupt\n",
  594                         device_get_nameunit(dev));
  595                 goto fail;
  596         }
  597 #ifdef IF_RT_DEBUG
  598         device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
  599 #endif
  600 
  601         return (0);
  602 
  603 fail:
  604         /* free Tx and Rx rings */
  605         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  606                 rt_free_tx_ring(sc, &sc->tx_ring[i]);
  607 
  608         for (i = 0; i < sc->rx_ring_count; i++)
  609                 rt_free_rx_ring(sc, &sc->rx_ring[i]);
  610 
  611         mtx_destroy(&sc->lock);
  612 
  613         if (sc->mem != NULL)
  614                 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
  615                     sc->mem);
  616 
  617         if (sc->irq != NULL)
  618                 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
  619                     sc->irq);
  620 
  621         return (error);
  622 }
  623 
  624 /*
  625  * Set media options.
  626  */
  627 static int
  628 rt_ifmedia_upd(struct ifnet *ifp)
  629 {
  630         struct rt_softc *sc;
  631 #ifdef IF_RT_PHY_SUPPORT
  632         struct mii_data *mii;
  633         struct mii_softc *miisc;
  634         int error = 0;
  635 
  636         sc = ifp->if_softc;
  637         RT_SOFTC_LOCK(sc);
  638 
  639         mii = device_get_softc(sc->rt_miibus);
  640         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
  641                 PHY_RESET(miisc);
  642         error = mii_mediachg(mii);
  643         RT_SOFTC_UNLOCK(sc);
  644 
  645         return (error);
  646 
  647 #else /* !IF_RT_PHY_SUPPORT */
  648 
  649         struct ifmedia *ifm;
  650         struct ifmedia_entry *ife;
  651 
  652         sc = ifp->if_softc;
  653         ifm = &sc->rt_ifmedia;
  654         ife = ifm->ifm_cur;
  655 
  656         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
  657                 return (EINVAL);
  658 
  659         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
  660                 device_printf(sc->dev,
  661                     "AUTO is not supported for multiphy MAC");
  662                 return (EINVAL);
  663         }
  664 
  665         /*
  666          * Ignore everything
  667          */
  668         return (0);
  669 #endif /* IF_RT_PHY_SUPPORT */
  670 }
  671 
  672 /*
  673  * Report current media status.
  674  */
  675 static void
  676 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  677 {
  678 #ifdef IF_RT_PHY_SUPPORT
  679         struct rt_softc *sc;
  680         struct mii_data *mii;
  681 
  682         sc = ifp->if_softc;
  683 
  684         RT_SOFTC_LOCK(sc);
  685         mii = device_get_softc(sc->rt_miibus);
  686         mii_pollstat(mii);
  687         ifmr->ifm_active = mii->mii_media_active;
  688         ifmr->ifm_status = mii->mii_media_status;
  689         ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
  690         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
  691         RT_SOFTC_UNLOCK(sc);
  692 #else /* !IF_RT_PHY_SUPPORT */
  693 
  694         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
  695         ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
  696 #endif /* IF_RT_PHY_SUPPORT */
  697 }
  698 
  699 static int
  700 rt_detach(device_t dev)
  701 {
  702         struct rt_softc *sc;
  703         struct ifnet *ifp;
  704         int i;
  705 
  706         sc = device_get_softc(dev);
  707         ifp = sc->ifp;
  708 
  709         RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
  710 
  711         RT_SOFTC_LOCK(sc);
  712         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  713         callout_stop(&sc->tx_watchdog_ch);
  714         RT_SOFTC_UNLOCK(sc);
  715 
  716         taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
  717         taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
  718         taskqueue_drain_timeout(sc->taskqueue, &sc->periodic_task);
  719 
  720         /* free Tx and Rx rings */
  721         RT_SOFTC_LOCK(sc);
  722         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  723                 rt_free_tx_ring(sc, &sc->tx_ring[i]);
  724         for (i = 0; i < sc->rx_ring_count; i++)
  725                 rt_free_rx_ring(sc, &sc->rx_ring[i]);
  726         RT_SOFTC_UNLOCK(sc);
  727 
  728 #ifdef IF_RT_PHY_SUPPORT
  729         if (sc->rt_miibus != NULL)
  730                 device_delete_child(dev, sc->rt_miibus);
  731 #endif
  732 
  733         ether_ifdetach(ifp);
  734         if_free(ifp);
  735 
  736         taskqueue_free(sc->taskqueue);
  737 
  738         mtx_destroy(&sc->lock);
  739 
  740         bus_generic_detach(dev);
  741         bus_teardown_intr(dev, sc->irq, sc->irqh);
  742         bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
  743         bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
  744 
  745         return (0);
  746 }
  747 
  748 static int
  749 rt_shutdown(device_t dev)
  750 {
  751         struct rt_softc *sc;
  752 
  753         sc = device_get_softc(dev);
  754         RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
  755         rt_stop(sc);
  756 
  757         return (0);
  758 }
  759 
  760 static int
  761 rt_suspend(device_t dev)
  762 {
  763         struct rt_softc *sc;
  764 
  765         sc = device_get_softc(dev);
  766         RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
  767         rt_stop(sc);
  768 
  769         return (0);
  770 }
  771 
  772 static int
  773 rt_resume(device_t dev)
  774 {
  775         struct rt_softc *sc;
  776         struct ifnet *ifp;
  777 
  778         sc = device_get_softc(dev);
  779         ifp = sc->ifp;
  780 
  781         RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
  782 
  783         if (ifp->if_flags & IFF_UP)
  784                 rt_init(sc);
  785 
  786         return (0);
  787 }
  788 
  789 /*
  790  * rt_init_locked - Run initialization process having locked mtx.
  791  */
  792 static void
  793 rt_init_locked(void *priv)
  794 {
  795         struct rt_softc *sc;
  796         struct ifnet *ifp;
  797 #ifdef IF_RT_PHY_SUPPORT
  798         struct mii_data *mii;
  799 #endif
  800         int i, ntries;
  801         uint32_t tmp;
  802 
  803         sc = priv;
  804         ifp = sc->ifp;
  805 #ifdef IF_RT_PHY_SUPPORT
  806         mii = device_get_softc(sc->rt_miibus);
  807 #endif
  808 
  809         RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
  810 
  811         RT_SOFTC_ASSERT_LOCKED(sc);
  812 
  813         /* hardware reset */
  814         //RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
  815         //rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
  816 
  817         /* Fwd to CPU (uni|broad|multi)cast and Unknown */
  818         if (sc->gdma1_base != 0)
  819                 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
  820                 (
  821                 GDM_ICS_EN | /* Enable IP Csum */
  822                 GDM_TCS_EN | /* Enable TCP Csum */
  823                 GDM_UCS_EN | /* Enable UDP Csum */
  824                 GDM_STRPCRC | /* Strip CRC from packet */
  825                 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
  826                 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
  827                 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
  828                 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
  829                 ));
  830 
  831         /* disable DMA engine */
  832         RT_WRITE(sc, sc->pdma_glo_cfg, 0);
  833         RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
  834 
  835         /* wait while DMA engine is busy */
  836         for (ntries = 0; ntries < 100; ntries++) {
  837                 tmp = RT_READ(sc, sc->pdma_glo_cfg);
  838                 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
  839                         break;
  840                 DELAY(1000);
  841         }
  842 
  843         if (ntries == 100) {
  844                 device_printf(sc->dev, "timeout waiting for DMA engine\n");
  845                 goto fail;
  846         }
  847 
  848         /* reset Rx and Tx rings */
  849         tmp = FE_RST_DRX_IDX0 |
  850                 FE_RST_DTX_IDX3 |
  851                 FE_RST_DTX_IDX2 |
  852                 FE_RST_DTX_IDX1 |
  853                 FE_RST_DTX_IDX0;
  854 
  855         RT_WRITE(sc, sc->pdma_rst_idx, tmp);
  856 
  857         /* XXX switch set mac address */
  858         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  859                 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
  860 
  861         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  862                 /* update TX_BASE_PTRx */
  863                 RT_WRITE(sc, sc->tx_base_ptr[i],
  864                         sc->tx_ring[i].desc_phys_addr);
  865                 RT_WRITE(sc, sc->tx_max_cnt[i],
  866                         RT_SOFTC_TX_RING_DESC_COUNT);
  867                 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
  868         }
  869 
  870         /* init Rx ring */
  871         for (i = 0; i < sc->rx_ring_count; i++)
  872                 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
  873 
  874         /* update RX_BASE_PTRx */
  875         for (i = 0; i < sc->rx_ring_count; i++) {
  876                 RT_WRITE(sc, sc->rx_base_ptr[i],
  877                         sc->rx_ring[i].desc_phys_addr);
  878                 RT_WRITE(sc, sc->rx_max_cnt[i],
  879                         RT_SOFTC_RX_RING_DATA_COUNT);
  880                 RT_WRITE(sc, sc->rx_calc_idx[i],
  881                         RT_SOFTC_RX_RING_DATA_COUNT - 1);
  882         }
  883 
  884         /* write back DDONE, 16byte burst enable RX/TX DMA */
  885         tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
  886         if (sc->rt_chipid == RT_CHIPID_MT7620 ||
  887             sc->rt_chipid == RT_CHIPID_MT7621)
  888                 tmp |= (1<<31);
  889         RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
  890 
  891         /* disable interrupts mitigation */
  892         RT_WRITE(sc, sc->delay_int_cfg, 0);
  893 
  894         /* clear pending interrupts */
  895         RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
  896 
  897         /* enable interrupts */
  898         if (sc->rt_chipid == RT_CHIPID_RT5350 ||
  899             sc->rt_chipid == RT_CHIPID_MT7620 ||
  900             sc->rt_chipid == RT_CHIPID_MT7621)
  901           tmp = RT5350_INT_TX_COHERENT |
  902                 RT5350_INT_RX_COHERENT |
  903                 RT5350_INT_TXQ3_DONE |
  904                 RT5350_INT_TXQ2_DONE |
  905                 RT5350_INT_TXQ1_DONE |
  906                 RT5350_INT_TXQ0_DONE |
  907                 RT5350_INT_RXQ1_DONE |
  908                 RT5350_INT_RXQ0_DONE;
  909         else
  910           tmp = CNT_PPE_AF |
  911                 CNT_GDM_AF |
  912                 PSE_P2_FC |
  913                 GDM_CRC_DROP |
  914                 PSE_BUF_DROP |
  915                 GDM_OTHER_DROP |
  916                 PSE_P1_FC |
  917                 PSE_P0_FC |
  918                 PSE_FQ_EMPTY |
  919                 INT_TX_COHERENT |
  920                 INT_RX_COHERENT |
  921                 INT_TXQ3_DONE |
  922                 INT_TXQ2_DONE |
  923                 INT_TXQ1_DONE |
  924                 INT_TXQ0_DONE |
  925                 INT_RX_DONE;
  926 
  927         sc->intr_enable_mask = tmp;
  928 
  929         RT_WRITE(sc, sc->fe_int_enable, tmp);
  930 
  931         if (rt_txrx_enable(sc) != 0)
  932                 goto fail;
  933 
  934 #ifdef IF_RT_PHY_SUPPORT
  935         if (mii) mii_mediachg(mii);
  936 #endif /* IF_RT_PHY_SUPPORT */
  937 
  938         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  939         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  940 
  941         sc->periodic_round = 0;
  942 
  943         taskqueue_enqueue_timeout(sc->taskqueue, &sc->periodic_task, hz / 10);
  944 
  945         return;
  946 
  947 fail:
  948         rt_stop_locked(sc);
  949 }
  950 
  951 /*
  952  * rt_init - lock and initialize device.
  953  */
  954 static void
  955 rt_init(void *priv)
  956 {
  957         struct rt_softc *sc;
  958 
  959         sc = priv;
  960         RT_SOFTC_LOCK(sc);
  961         rt_init_locked(sc);
  962         RT_SOFTC_UNLOCK(sc);
  963 }
  964 
  965 /*
  966  * rt_stop_locked - stop TX/RX w/ lock
  967  */
  968 static void
  969 rt_stop_locked(void *priv)
  970 {
  971         struct rt_softc *sc;
  972         struct ifnet *ifp;
  973 
  974         sc = priv;
  975         ifp = sc->ifp;
  976 
  977         RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
  978 
  979         RT_SOFTC_ASSERT_LOCKED(sc);
  980         sc->tx_timer = 0;
  981         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  982         callout_stop(&sc->tx_watchdog_ch);
  983         RT_SOFTC_UNLOCK(sc);
  984         taskqueue_block(sc->taskqueue);
  985 
  986         /*
  987          * Sometime rt_stop_locked called from isr and we get panic
  988          * When found, I fix it
  989          */
  990 #ifdef notyet
  991         taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
  992         taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
  993         taskqueue_drain_timeout(sc->taskqueue, &sc->periodic_task);
  994 #else
  995         taskqueue_cancel_timeout(sc->taskqueue, &sc->periodic_task, NULL);
  996 #endif
  997         RT_SOFTC_LOCK(sc);
  998 
  999         /* disable interrupts */
 1000         RT_WRITE(sc, sc->fe_int_enable, 0);
 1001 
 1002         if(sc->rt_chipid != RT_CHIPID_RT5350 &&
 1003            sc->rt_chipid != RT_CHIPID_MT7620 &&
 1004            sc->rt_chipid != RT_CHIPID_MT7621) {
 1005                 /* reset adapter */
 1006                 RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
 1007         }
 1008 
 1009         if (sc->gdma1_base != 0)
 1010                 RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
 1011                 (
 1012                 GDM_ICS_EN | /* Enable IP Csum */
 1013                 GDM_TCS_EN | /* Enable TCP Csum */
 1014                 GDM_UCS_EN | /* Enable UDP Csum */
 1015                 GDM_STRPCRC | /* Strip CRC from packet */
 1016                 GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
 1017                 GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
 1018                 GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
 1019                 GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
 1020                 ));
 1021 }
 1022 
 1023 static void
 1024 rt_stop(void *priv)
 1025 {
 1026         struct rt_softc *sc;
 1027 
 1028         sc = priv;
 1029         RT_SOFTC_LOCK(sc);
 1030         rt_stop_locked(sc);
 1031         RT_SOFTC_UNLOCK(sc);
 1032 }
 1033 
 1034 /*
 1035  * rt_tx_data - transmit packet.
 1036  */
 1037 static int
 1038 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
 1039 {
 1040         struct ifnet *ifp;
 1041         struct rt_softc_tx_ring *ring;
 1042         struct rt_softc_tx_data *data;
 1043         struct rt_txdesc *desc;
 1044         struct mbuf *m_d;
 1045         bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
 1046         int error, ndmasegs, ndescs, i;
 1047 
 1048         KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
 1049                 ("%s: Tx data: invalid qid=%d\n",
 1050                  device_get_nameunit(sc->dev), qid));
 1051 
 1052         RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
 1053 
 1054         ifp = sc->ifp;
 1055         ring = &sc->tx_ring[qid];
 1056         desc = &ring->desc[ring->desc_cur];
 1057         data = &ring->data[ring->data_cur];
 1058 
 1059         error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
 1060             dma_seg, &ndmasegs, 0);
 1061         if (error != 0) {
 1062                 /* too many fragments, linearize */
 1063 
 1064                 RT_DPRINTF(sc, RT_DEBUG_TX,
 1065                         "could not load mbuf DMA map, trying to linearize "
 1066                         "mbuf: ndmasegs=%d, len=%d, error=%d\n",
 1067                         ndmasegs, m->m_pkthdr.len, error);
 1068 
 1069                 m_d = m_collapse(m, M_NOWAIT, 16);
 1070                 if (m_d == NULL) {
 1071                         m_freem(m);
 1072                         m = NULL;
 1073                         return (ENOMEM);
 1074                 }
 1075                 m = m_d;
 1076 
 1077                 sc->tx_defrag_packets++;
 1078 
 1079                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
 1080                     data->dma_map, m, dma_seg, &ndmasegs, 0);
 1081                 if (error != 0) {
 1082                         device_printf(sc->dev, "could not load mbuf DMA map: "
 1083                             "ndmasegs=%d, len=%d, error=%d\n",
 1084                             ndmasegs, m->m_pkthdr.len, error);
 1085                         m_freem(m);
 1086                         return (error);
 1087                 }
 1088         }
 1089 
 1090         if (m->m_pkthdr.len == 0)
 1091                 ndmasegs = 0;
 1092 
 1093         /* determine how many Tx descs are required */
 1094         ndescs = 1 + ndmasegs / 2;
 1095         if ((ring->desc_queued + ndescs) >
 1096             (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
 1097                 RT_DPRINTF(sc, RT_DEBUG_TX,
 1098                     "there are not enough Tx descs\n");
 1099 
 1100                 sc->no_tx_desc_avail++;
 1101 
 1102                 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 1103                 m_freem(m);
 1104                 return (EFBIG);
 1105         }
 1106 
 1107         data->m = m;
 1108 
 1109         /* set up Tx descs */
 1110         for (i = 0; i < ndmasegs; i += 2) {
 1111                 /* TODO: this needs to be refined as MT7620 for example has
 1112                  * a different word3 layout than RT305x and RT5350 (the last
 1113                  * one doesn't use word3 at all). And so does MT7621...
 1114                  */
 1115 
 1116                 if (sc->rt_chipid != RT_CHIPID_MT7621) {
 1117                         /* Set destination */
 1118                         if (sc->rt_chipid != RT_CHIPID_MT7620)
 1119                             desc->dst = (TXDSCR_DST_PORT_GDMA1);
 1120 
 1121                         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1122                                 desc->dst |= (TXDSCR_IP_CSUM_GEN |
 1123                                     TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
 1124                         /* Set queue id */
 1125                         desc->qn = qid;
 1126                         /* No PPPoE */
 1127                         desc->pppoe = 0;
 1128                         /* No VLAN */
 1129                         desc->vid = 0;
 1130                 } else {
 1131                         desc->vid = 0;
 1132                         desc->pppoe = 0;
 1133                         desc->qn = 0;
 1134                         desc->dst = 2;
 1135                 }
 1136 
 1137                 desc->sdp0 = htole32(dma_seg[i].ds_addr);
 1138                 desc->sdl0 = htole16(dma_seg[i].ds_len |
 1139                     ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
 1140 
 1141                 if ((i+1) < ndmasegs) {
 1142                         desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
 1143                         desc->sdl1 = htole16(dma_seg[i+1].ds_len |
 1144                             ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
 1145                 } else {
 1146                         desc->sdp1 = 0;
 1147                         desc->sdl1 = 0;
 1148                 }
 1149 
 1150                 if ((i+2) < ndmasegs) {
 1151                         ring->desc_queued++;
 1152                         ring->desc_cur = (ring->desc_cur + 1) %
 1153                             RT_SOFTC_TX_RING_DESC_COUNT;
 1154                 }
 1155                 desc = &ring->desc[ring->desc_cur];
 1156         }
 1157 
 1158         RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
 1159             "DMA ds_len=%d/%d/%d/%d/%d\n",
 1160             m->m_pkthdr.len, ndmasegs,
 1161             (int) dma_seg[0].ds_len,
 1162             (int) dma_seg[1].ds_len,
 1163             (int) dma_seg[2].ds_len,
 1164             (int) dma_seg[3].ds_len,
 1165             (int) dma_seg[4].ds_len);
 1166 
 1167         bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
 1168                 BUS_DMASYNC_PREWRITE);
 1169         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1170                 BUS_DMASYNC_PREWRITE);
 1171         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1172                 BUS_DMASYNC_PREWRITE);
 1173 
 1174         ring->desc_queued++;
 1175         ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
 1176 
 1177         ring->data_queued++;
 1178         ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
 1179 
 1180         /* kick Tx */
 1181         RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
 1182 
 1183         return (0);
 1184 }
 1185 
 1186 /*
 1187  * rt_start - start Transmit/Receive
 1188  */
 1189 static void
 1190 rt_start(struct ifnet *ifp)
 1191 {
 1192         struct rt_softc *sc;
 1193         struct mbuf *m;
 1194         int qid = 0 /* XXX must check QoS priority */;
 1195 
 1196         sc = ifp->if_softc;
 1197 
 1198         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1199                 return;
 1200 
 1201         for (;;) {
 1202                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1203                 if (m == NULL)
 1204                         break;
 1205 
 1206                 m->m_pkthdr.rcvif = NULL;
 1207 
 1208                 RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
 1209 
 1210                 if (sc->tx_ring[qid].data_queued >=
 1211                     RT_SOFTC_TX_RING_DATA_COUNT) {
 1212                         RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1213 
 1214                         RT_DPRINTF(sc, RT_DEBUG_TX,
 1215                             "if_start: Tx ring with qid=%d is full\n", qid);
 1216 
 1217                         m_freem(m);
 1218 
 1219                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1220                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1221 
 1222                         sc->tx_data_queue_full[qid]++;
 1223 
 1224                         break;
 1225                 }
 1226 
 1227                 if (rt_tx_data(sc, m, qid) != 0) {
 1228                         RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1229 
 1230                         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1231 
 1232                         break;
 1233                 }
 1234 
 1235                 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1236                 sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
 1237                 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
 1238         }
 1239 }
 1240 
 1241 /*
 1242  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
 1243  * filtering done by attached Ethernet switch.
 1244  */
 1245 static void
 1246 rt_update_promisc(struct ifnet *ifp)
 1247 {
 1248         struct rt_softc *sc;
 1249 
 1250         sc = ifp->if_softc;
 1251         printf("%s: %s promiscuous mode\n",
 1252                 device_get_nameunit(sc->dev),
 1253                 (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
 1254 }
 1255 
 1256 /*
 1257  * rt_ioctl - ioctl handler.
 1258  */
 1259 static int
 1260 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1261 {
 1262         struct rt_softc *sc;
 1263         struct ifreq *ifr;
 1264 #ifdef IF_RT_PHY_SUPPORT
 1265         struct mii_data *mii;
 1266 #endif /* IF_RT_PHY_SUPPORT */
 1267         int error, startall;
 1268 
 1269         sc = ifp->if_softc;
 1270         ifr = (struct ifreq *) data;
 1271 
 1272         error = 0;
 1273 
 1274         switch (cmd) {
 1275         case SIOCSIFFLAGS:
 1276                 startall = 0;
 1277                 RT_SOFTC_LOCK(sc);
 1278                 if (ifp->if_flags & IFF_UP) {
 1279                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1280                                 if ((ifp->if_flags ^ sc->if_flags) &
 1281                                     IFF_PROMISC)
 1282                                         rt_update_promisc(ifp);
 1283                         } else {
 1284                                 rt_init_locked(sc);
 1285                                 startall = 1;
 1286                         }
 1287                 } else {
 1288                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1289                                 rt_stop_locked(sc);
 1290                 }
 1291                 sc->if_flags = ifp->if_flags;
 1292                 RT_SOFTC_UNLOCK(sc);
 1293                 break;
 1294         case SIOCGIFMEDIA:
 1295         case SIOCSIFMEDIA:
 1296 #ifdef IF_RT_PHY_SUPPORT
 1297                 mii = device_get_softc(sc->rt_miibus);
 1298                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1299 #else
 1300                 error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
 1301 #endif /* IF_RT_PHY_SUPPORT */
 1302                 break;
 1303         default:
 1304                 error = ether_ioctl(ifp, cmd, data);
 1305                 break;
 1306         }
 1307         return (error);
 1308 }
 1309 
 1310 /*
 1311  * rt_tx_watchdog - Handler of TX Watchdog
 1312  */
 1313 static void
 1314 rt_tx_watchdog(void *arg)
 1315 {
 1316         struct rt_softc *sc;
 1317         struct ifnet *ifp;
 1318 
 1319         sc = arg;
 1320         ifp = sc->ifp;
 1321 
 1322         if (sc->tx_timer == 0)
 1323                 return;
 1324 
 1325         if (--sc->tx_timer == 0) {
 1326                 device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
 1327 #ifdef notyet
 1328                 /*
 1329                  * XXX: Commented out, because reset break input.
 1330                  */
 1331                 rt_stop_locked(sc);
 1332                 rt_init_locked(sc);
 1333 #endif
 1334                 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
 1335                 sc->tx_watchdog_timeouts++;
 1336         }
 1337         callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
 1338 }
 1339 
 1340 /*
 1341  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
 1342  */
 1343 static void
 1344 rt_cnt_ppe_af(struct rt_softc *sc)
 1345 {
 1346 
 1347         RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
 1348 }
 1349 
 1350 /*
 1351  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
 1352  */
 1353 static void
 1354 rt_cnt_gdm_af(struct rt_softc *sc)
 1355 {
 1356 
 1357         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1358             "GDMA 1 & 2 Counter Table Almost Full\n");
 1359 }
 1360 
 1361 /*
 1362  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
 1363  */
 1364 static void
 1365 rt_pse_p2_fc(struct rt_softc *sc)
 1366 {
 1367 
 1368         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1369             "PSE port2 (GDMA 2) flow control asserted.\n");
 1370 }
 1371 
 1372 /*
 1373  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
 1374  * interrupt
 1375  */
 1376 static void
 1377 rt_gdm_crc_drop(struct rt_softc *sc)
 1378 {
 1379 
 1380         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1381             "GDMA 1 & 2 discard a packet due to CRC error\n");
 1382 }
 1383 
 1384 /*
 1385  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
 1386  */
 1387 static void
 1388 rt_pse_buf_drop(struct rt_softc *sc)
 1389 {
 1390 
 1391         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1392             "PSE discards a packet due to buffer sharing limitation\n");
 1393 }
 1394 
 1395 /*
 1396  * rt_gdm_other_drop - Handler of discard on other reason interrupt
 1397  */
 1398 static void
 1399 rt_gdm_other_drop(struct rt_softc *sc)
 1400 {
 1401 
 1402         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1403             "GDMA 1 & 2 discard a packet due to other reason\n");
 1404 }
 1405 
 1406 /*
 1407  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
 1408  */
 1409 static void
 1410 rt_pse_p1_fc(struct rt_softc *sc)
 1411 {
 1412 
 1413         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1414             "PSE port1 (GDMA 1) flow control asserted.\n");
 1415 }
 1416 
 1417 /*
 1418  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
 1419  */
 1420 static void
 1421 rt_pse_p0_fc(struct rt_softc *sc)
 1422 {
 1423 
 1424         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1425             "PSE port0 (CDMA) flow control asserted.\n");
 1426 }
 1427 
 1428 /*
 1429  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
 1430  */
 1431 static void
 1432 rt_pse_fq_empty(struct rt_softc *sc)
 1433 {
 1434 
 1435         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1436             "PSE free Q empty threshold reached & forced drop "
 1437                     "condition occurred.\n");
 1438 }
 1439 
 1440 /*
 1441  * rt_intr - main ISR
 1442  */
 1443 static void
 1444 rt_intr(void *arg)
 1445 {
 1446         struct rt_softc *sc;
 1447         struct ifnet *ifp;
 1448         uint32_t status;
 1449 
 1450         sc = arg;
 1451         ifp = sc->ifp;
 1452 
 1453         /* acknowledge interrupts */
 1454         status = RT_READ(sc, sc->fe_int_status);
 1455         RT_WRITE(sc, sc->fe_int_status, status);
 1456 
 1457         RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
 1458 
 1459         if (status == 0xffffffff ||     /* device likely went away */
 1460                 status == 0)            /* not for us */
 1461                 return;
 1462 
 1463         sc->interrupts++;
 1464 
 1465         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1466                 return;
 1467 
 1468         if (status & CNT_PPE_AF)
 1469                 rt_cnt_ppe_af(sc);
 1470 
 1471         if (status & CNT_GDM_AF)
 1472                 rt_cnt_gdm_af(sc);
 1473 
 1474         if (status & PSE_P2_FC)
 1475                 rt_pse_p2_fc(sc);
 1476 
 1477         if (status & GDM_CRC_DROP)
 1478                 rt_gdm_crc_drop(sc);
 1479 
 1480         if (status & PSE_BUF_DROP)
 1481                 rt_pse_buf_drop(sc);
 1482 
 1483         if (status & GDM_OTHER_DROP)
 1484                 rt_gdm_other_drop(sc);
 1485 
 1486         if (status & PSE_P1_FC)
 1487                 rt_pse_p1_fc(sc);
 1488 
 1489         if (status & PSE_P0_FC)
 1490                 rt_pse_p0_fc(sc);
 1491 
 1492         if (status & PSE_FQ_EMPTY)
 1493                 rt_pse_fq_empty(sc);
 1494 
 1495         if (status & INT_TX_COHERENT)
 1496                 rt_tx_coherent_intr(sc);
 1497 
 1498         if (status & INT_RX_COHERENT)
 1499                 rt_rx_coherent_intr(sc);
 1500 
 1501         if (status & RX_DLY_INT)
 1502                 rt_rx_delay_intr(sc);
 1503 
 1504         if (status & TX_DLY_INT)
 1505                 rt_tx_delay_intr(sc);
 1506 
 1507         if (status & INT_RX_DONE)
 1508                 rt_rx_intr(sc, 0);
 1509 
 1510         if (status & INT_TXQ3_DONE)
 1511                 rt_tx_intr(sc, 3);
 1512 
 1513         if (status & INT_TXQ2_DONE)
 1514                 rt_tx_intr(sc, 2);
 1515 
 1516         if (status & INT_TXQ1_DONE)
 1517                 rt_tx_intr(sc, 1);
 1518 
 1519         if (status & INT_TXQ0_DONE)
 1520                 rt_tx_intr(sc, 0);
 1521 }
 1522 
 1523 /*
 1524  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
 1525  */
 1526 static void
 1527 rt_rt5350_intr(void *arg)
 1528 {
 1529         struct rt_softc *sc;
 1530         struct ifnet *ifp;
 1531         uint32_t status;
 1532 
 1533         sc = arg;
 1534         ifp = sc->ifp;
 1535 
 1536         /* acknowledge interrupts */
 1537         status = RT_READ(sc, sc->fe_int_status);
 1538         RT_WRITE(sc, sc->fe_int_status, status);
 1539 
 1540         RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
 1541 
 1542         if (status == 0xffffffff ||     /* device likely went away */
 1543                 status == 0)            /* not for us */
 1544                 return;
 1545 
 1546         sc->interrupts++;
 1547 
 1548         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1549                 return;
 1550 
 1551         if (status & RT5350_INT_TX_COHERENT)
 1552                 rt_tx_coherent_intr(sc);
 1553         if (status & RT5350_INT_RX_COHERENT)
 1554                 rt_rx_coherent_intr(sc);
 1555         if (status & RT5350_RX_DLY_INT)
 1556                 rt_rx_delay_intr(sc);
 1557         if (status & RT5350_TX_DLY_INT)
 1558                 rt_tx_delay_intr(sc);
 1559         if (status & RT5350_INT_RXQ1_DONE)
 1560                 rt_rx_intr(sc, 1);      
 1561         if (status & RT5350_INT_RXQ0_DONE)
 1562                 rt_rx_intr(sc, 0);      
 1563         if (status & RT5350_INT_TXQ3_DONE)
 1564                 rt_tx_intr(sc, 3);
 1565         if (status & RT5350_INT_TXQ2_DONE)
 1566                 rt_tx_intr(sc, 2);
 1567         if (status & RT5350_INT_TXQ1_DONE)
 1568                 rt_tx_intr(sc, 1);
 1569         if (status & RT5350_INT_TXQ0_DONE)
 1570                 rt_tx_intr(sc, 0);
 1571 } 
 1572 
 1573 static void
 1574 rt_tx_coherent_intr(struct rt_softc *sc)
 1575 {
 1576         uint32_t tmp;
 1577         int i;
 1578 
 1579         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
 1580 
 1581         sc->tx_coherent_interrupts++;
 1582 
 1583         /* restart DMA engine */
 1584         tmp = RT_READ(sc, sc->pdma_glo_cfg);
 1585         tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
 1586         RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
 1587 
 1588         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
 1589                 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
 1590 
 1591         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
 1592                 RT_WRITE(sc, sc->tx_base_ptr[i],
 1593                         sc->tx_ring[i].desc_phys_addr);
 1594                 RT_WRITE(sc, sc->tx_max_cnt[i],
 1595                         RT_SOFTC_TX_RING_DESC_COUNT);
 1596                 RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
 1597         }
 1598 
 1599         rt_txrx_enable(sc);
 1600 }
 1601 
 1602 /*
 1603  * rt_rx_coherent_intr
 1604  */
 1605 static void
 1606 rt_rx_coherent_intr(struct rt_softc *sc)
 1607 {
 1608         uint32_t tmp;
 1609         int i;
 1610 
 1611         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
 1612 
 1613         sc->rx_coherent_interrupts++;
 1614 
 1615         /* restart DMA engine */
 1616         tmp = RT_READ(sc, sc->pdma_glo_cfg);
 1617         tmp &= ~(FE_RX_DMA_EN);
 1618         RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
 1619 
 1620         /* init Rx ring */
 1621         for (i = 0; i < sc->rx_ring_count; i++)
 1622                 rt_reset_rx_ring(sc, &sc->rx_ring[i]);
 1623 
 1624         for (i = 0; i < sc->rx_ring_count; i++) {
 1625                 RT_WRITE(sc, sc->rx_base_ptr[i],
 1626                         sc->rx_ring[i].desc_phys_addr);
 1627                 RT_WRITE(sc, sc->rx_max_cnt[i],
 1628                         RT_SOFTC_RX_RING_DATA_COUNT);
 1629                 RT_WRITE(sc, sc->rx_calc_idx[i],
 1630                         RT_SOFTC_RX_RING_DATA_COUNT - 1);
 1631         }
 1632 
 1633         rt_txrx_enable(sc);
 1634 }
 1635 
 1636 /*
 1637  * rt_rx_intr - a packet received
 1638  */
 1639 static void
 1640 rt_rx_intr(struct rt_softc *sc, int qid)
 1641 {
 1642         KASSERT(qid >= 0 && qid < sc->rx_ring_count,
 1643                 ("%s: Rx interrupt: invalid qid=%d\n",
 1644                  device_get_nameunit(sc->dev), qid));
 1645 
 1646         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
 1647         sc->rx_interrupts[qid]++;
 1648         RT_SOFTC_LOCK(sc);
 1649 
 1650         if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
 1651                 rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
 1652                 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
 1653         }
 1654 
 1655         sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
 1656         RT_SOFTC_UNLOCK(sc);
 1657 }
 1658 
 1659 static void
 1660 rt_rx_delay_intr(struct rt_softc *sc)
 1661 {
 1662 
 1663         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
 1664         sc->rx_delay_interrupts++;
 1665 }
 1666 
 1667 static void
 1668 rt_tx_delay_intr(struct rt_softc *sc)
 1669 {
 1670 
 1671         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
 1672         sc->tx_delay_interrupts++;
 1673 }
 1674 
 1675 /*
 1676  * rt_tx_intr - Transsmition of packet done
 1677  */
 1678 static void
 1679 rt_tx_intr(struct rt_softc *sc, int qid)
 1680 {
 1681 
 1682         KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
 1683                 ("%s: Tx interrupt: invalid qid=%d\n",
 1684                  device_get_nameunit(sc->dev), qid));
 1685 
 1686         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
 1687 
 1688         sc->tx_interrupts[qid]++;
 1689         RT_SOFTC_LOCK(sc);
 1690 
 1691         if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
 1692                 rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
 1693                 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
 1694         }
 1695 
 1696         sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
 1697         RT_SOFTC_UNLOCK(sc);
 1698 }
 1699 
 1700 /*
 1701  * rt_rx_done_task - run RX task
 1702  */
 1703 static void
 1704 rt_rx_done_task(void *context, int pending)
 1705 {
 1706         struct rt_softc *sc;
 1707         struct ifnet *ifp;
 1708         int again;
 1709 
 1710         sc = context;
 1711         ifp = sc->ifp;
 1712 
 1713         RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
 1714 
 1715         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1716                 return;
 1717 
 1718         sc->intr_pending_mask &= ~sc->int_rx_done_mask;
 1719 
 1720         again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
 1721 
 1722         RT_SOFTC_LOCK(sc);
 1723 
 1724         if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
 1725                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1726                     "Rx done task: scheduling again\n");
 1727                 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
 1728         } else {
 1729                 rt_intr_enable(sc, sc->int_rx_done_mask);
 1730         }
 1731 
 1732         RT_SOFTC_UNLOCK(sc);
 1733 }
 1734 
 1735 /*
 1736  * rt_tx_done_task - check for pending TX task in all queues
 1737  */
 1738 static void
 1739 rt_tx_done_task(void *context, int pending)
 1740 {
 1741         struct rt_softc *sc;
 1742         struct ifnet *ifp;
 1743         uint32_t intr_mask;
 1744         int i;
 1745 
 1746         sc = context;
 1747         ifp = sc->ifp;
 1748 
 1749         RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
 1750 
 1751         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1752                 return;
 1753 
 1754         for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
 1755                 if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
 1756                         sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
 1757                         rt_tx_eof(sc, &sc->tx_ring[i]);
 1758                 }
 1759         }
 1760 
 1761         sc->tx_timer = 0;
 1762 
 1763         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1764 
 1765         if(sc->rt_chipid == RT_CHIPID_RT5350 ||
 1766            sc->rt_chipid == RT_CHIPID_MT7620 ||
 1767            sc->rt_chipid == RT_CHIPID_MT7621)
 1768           intr_mask = (
 1769                 RT5350_INT_TXQ3_DONE |
 1770                 RT5350_INT_TXQ2_DONE |
 1771                 RT5350_INT_TXQ1_DONE |
 1772                 RT5350_INT_TXQ0_DONE);
 1773         else
 1774           intr_mask = (
 1775                 INT_TXQ3_DONE |
 1776                 INT_TXQ2_DONE |
 1777                 INT_TXQ1_DONE |
 1778                 INT_TXQ0_DONE);
 1779 
 1780         RT_SOFTC_LOCK(sc);
 1781 
 1782         rt_intr_enable(sc, ~sc->intr_pending_mask &
 1783             (sc->intr_disable_mask & intr_mask));
 1784 
 1785         if (sc->intr_pending_mask & intr_mask) {
 1786                 RT_DPRINTF(sc, RT_DEBUG_TX,
 1787                     "Tx done task: scheduling again\n");
 1788                 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
 1789         }
 1790 
 1791         RT_SOFTC_UNLOCK(sc);
 1792 
 1793         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 1794                 rt_start(ifp);
 1795 }
 1796 
 1797 /*
 1798  * rt_periodic_task - run periodic task
 1799  */
 1800 static void
 1801 rt_periodic_task(void *context, int pending)
 1802 {
 1803         struct rt_softc *sc;
 1804         struct ifnet *ifp;
 1805 
 1806         sc = context;
 1807         ifp = sc->ifp;
 1808 
 1809         RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
 1810             sc->periodic_round);
 1811 
 1812         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1813                 return;
 1814 
 1815         RT_SOFTC_LOCK(sc);
 1816         sc->periodic_round++;
 1817         rt_update_stats(sc);
 1818 
 1819         if ((sc->periodic_round % 10) == 0) {
 1820                 rt_update_raw_counters(sc);
 1821                 rt_watchdog(sc);
 1822         }
 1823 
 1824         RT_SOFTC_UNLOCK(sc);
 1825         taskqueue_enqueue_timeout(sc->taskqueue, &sc->periodic_task, hz / 10);
 1826 }
 1827 
 1828 /*
 1829  * rt_rx_eof - check for frames that done by DMA engine and pass it into
 1830  * network subsystem.
 1831  */
 1832 static int
 1833 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
 1834 {
 1835         struct ifnet *ifp;
 1836 /*      struct rt_softc_rx_ring *ring; */
 1837         struct rt_rxdesc *desc;
 1838         struct rt_softc_rx_data *data;
 1839         struct mbuf *m, *mnew;
 1840         bus_dma_segment_t segs[1];
 1841         bus_dmamap_t dma_map;
 1842         uint32_t index, desc_flags;
 1843         int error, nsegs, len, nframes;
 1844 
 1845         ifp = sc->ifp;
 1846 /*      ring = &sc->rx_ring[0]; */
 1847 
 1848         nframes = 0;
 1849 
 1850         while (limit != 0) {
 1851                 index = RT_READ(sc, sc->rx_drx_idx[0]);
 1852                 if (ring->cur == index)
 1853                         break;
 1854 
 1855                 desc = &ring->desc[ring->cur];
 1856                 data = &ring->data[ring->cur];
 1857 
 1858                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1859                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1860 
 1861 #ifdef IF_RT_DEBUG
 1862                 if ( sc->debug & RT_DEBUG_RX ) {
 1863                         printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
 1864                         hexdump(desc, 16, 0, 0);
 1865                         printf("-----------------------------------\n");
 1866                 }
 1867 #endif
 1868 
 1869                 /* XXX Sometime device don`t set DDONE bit */
 1870 #ifdef DDONE_FIXED
 1871                 if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
 1872                         RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
 1873                         break;
 1874                 }
 1875 #endif
 1876 
 1877                 len = le16toh(desc->sdl0) & 0x3fff;
 1878                 RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
 1879 
 1880                 nframes++;
 1881 
 1882                 mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
 1883                     MJUMPAGESIZE);
 1884                 if (mnew == NULL) {
 1885                         sc->rx_mbuf_alloc_errors++;
 1886                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1887                         goto skip;
 1888                 }
 1889 
 1890                 mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
 1891 
 1892                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
 1893                     ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
 1894                 if (error != 0) {
 1895                         RT_DPRINTF(sc, RT_DEBUG_RX,
 1896                             "could not load Rx mbuf DMA map: "
 1897                             "error=%d, nsegs=%d\n",
 1898                             error, nsegs);
 1899 
 1900                         m_freem(mnew);
 1901 
 1902                         sc->rx_mbuf_dmamap_errors++;
 1903                         if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1904 
 1905                         goto skip;
 1906                 }
 1907 
 1908                 KASSERT(nsegs == 1, ("%s: too many DMA segments",
 1909                         device_get_nameunit(sc->dev)));
 1910 
 1911                 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1912                         BUS_DMASYNC_POSTREAD);
 1913                 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 1914 
 1915                 dma_map = data->dma_map;
 1916                 data->dma_map = ring->spare_dma_map;
 1917                 ring->spare_dma_map = dma_map;
 1918 
 1919                 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1920                         BUS_DMASYNC_PREREAD);
 1921 
 1922                 m = data->m;
 1923                 desc_flags = desc->word3;
 1924 
 1925                 data->m = mnew;
 1926                 /* Add 2 for proper align of RX IP header */
 1927                 desc->sdp0 = htole32(segs[0].ds_addr+2);
 1928                 desc->sdl0 = htole32(segs[0].ds_len-2);
 1929                 desc->word3 = 0;
 1930 
 1931                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1932                     "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
 1933 
 1934                 m->m_pkthdr.rcvif = ifp;
 1935                 /* Add 2 to fix data align, after sdp0 = addr + 2 */
 1936                 m->m_data += 2;
 1937                 m->m_pkthdr.len = m->m_len = len;
 1938 
 1939                 /* check for crc errors */
 1940                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 1941                         /*check for valid checksum*/
 1942                         if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
 1943                                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1944                                     "rxdesc: crc error\n");
 1945 
 1946                                 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
 1947 
 1948                                 if (!(ifp->if_flags & IFF_PROMISC)) {
 1949                                     m_freem(m);
 1950                                     goto skip;
 1951                                 }
 1952                         }
 1953                         if ((desc_flags & sc->csum_fail_ip) == 0) {
 1954                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1955                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1956                                 m->m_pkthdr.csum_data = 0xffff;
 1957                         }
 1958                         m->m_flags &= ~M_HASFCS;
 1959                 }
 1960 
 1961                 (*ifp->if_input)(ifp, m);
 1962 skip:
 1963                 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
 1964 
 1965                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1966                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1967 
 1968                 ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
 1969 
 1970                 limit--;
 1971         }
 1972 
 1973         if (ring->cur == 0)
 1974                 RT_WRITE(sc, sc->rx_calc_idx[0],
 1975                         RT_SOFTC_RX_RING_DATA_COUNT - 1);
 1976         else
 1977                 RT_WRITE(sc, sc->rx_calc_idx[0],
 1978                         ring->cur - 1);
 1979 
 1980         RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
 1981 
 1982         sc->rx_packets += nframes;
 1983 
 1984         return (limit == 0);
 1985 }
 1986 
 1987 /*
 1988  * rt_tx_eof - check for successful transmitted frames and mark their
 1989  * descriptor as free.
 1990  */
 1991 static void
 1992 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 1993 {
 1994         struct ifnet *ifp;
 1995         struct rt_txdesc *desc;
 1996         struct rt_softc_tx_data *data;
 1997         uint32_t index;
 1998         int ndescs, nframes;
 1999 
 2000         ifp = sc->ifp;
 2001 
 2002         ndescs = 0;
 2003         nframes = 0;
 2004 
 2005         for (;;) {
 2006                 index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
 2007                 if (ring->desc_next == index)
 2008                         break;
 2009 
 2010                 ndescs++;
 2011 
 2012                 desc = &ring->desc[ring->desc_next];
 2013 
 2014                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2015                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 2016 
 2017                 if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
 2018                         desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
 2019                         nframes++;
 2020 
 2021                         data = &ring->data[ring->data_next];
 2022 
 2023                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2024                                 BUS_DMASYNC_POSTWRITE);
 2025                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2026 
 2027                         m_freem(data->m);
 2028 
 2029                         data->m = NULL;
 2030 
 2031                         if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
 2032 
 2033                         RT_SOFTC_TX_RING_LOCK(ring);
 2034                         ring->data_queued--;
 2035                         ring->data_next = (ring->data_next + 1) %
 2036                             RT_SOFTC_TX_RING_DATA_COUNT;
 2037                         RT_SOFTC_TX_RING_UNLOCK(ring);
 2038                 }
 2039 
 2040                 desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
 2041 
 2042                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2043                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2044 
 2045                 RT_SOFTC_TX_RING_LOCK(ring);
 2046                 ring->desc_queued--;
 2047                 ring->desc_next = (ring->desc_next + 1) %
 2048                     RT_SOFTC_TX_RING_DESC_COUNT;
 2049                 RT_SOFTC_TX_RING_UNLOCK(ring);
 2050         }
 2051 
 2052         RT_DPRINTF(sc, RT_DEBUG_TX,
 2053             "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
 2054             nframes);
 2055 }
 2056 
 2057 /*
 2058  * rt_update_stats - query statistics counters and update related variables.
 2059  */
 2060 static void
 2061 rt_update_stats(struct rt_softc *sc)
 2062 {
 2063         struct ifnet *ifp;
 2064 
 2065         ifp = sc->ifp;
 2066         RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
 2067         /* XXX do update stats here */
 2068 }
 2069 
 2070 /*
 2071  * rt_watchdog - reinit device on watchdog event.
 2072  */
 2073 static void
 2074 rt_watchdog(struct rt_softc *sc)
 2075 {
 2076         uint32_t tmp;
 2077 #ifdef notyet
 2078         int ntries;
 2079 #endif
 2080         if(sc->rt_chipid != RT_CHIPID_RT5350 &&
 2081            sc->rt_chipid != RT_CHIPID_MT7620 &&
 2082            sc->rt_chipid != RT_CHIPID_MT7621) {
 2083                 tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
 2084 
 2085                 RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
 2086                            "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
 2087         }
 2088         /* XXX: do not reset */
 2089 #ifdef notyet
 2090         if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
 2091                 sc->tx_queue_not_empty[0]++;
 2092 
 2093                 for (ntries = 0; ntries < 10; ntries++) {
 2094                         tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
 2095                         if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
 2096                                 break;
 2097 
 2098                         DELAY(1);
 2099                 }
 2100         }
 2101 
 2102         if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
 2103                 sc->tx_queue_not_empty[1]++;
 2104 
 2105                 for (ntries = 0; ntries < 10; ntries++) {
 2106                         tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
 2107                         if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
 2108                                 break;
 2109 
 2110                         DELAY(1);
 2111                 }
 2112         }
 2113 #endif
 2114 }
 2115 
 2116 /*
 2117  * rt_update_raw_counters - update counters.
 2118  */
 2119 static void
 2120 rt_update_raw_counters(struct rt_softc *sc)
 2121 {
 2122 
 2123         sc->tx_bytes    += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
 2124         sc->tx_packets  += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
 2125         sc->tx_skip     += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
 2126         sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
 2127 
 2128         sc->rx_bytes    += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
 2129         sc->rx_packets  += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
 2130         sc->rx_crc_err  += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
 2131         sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
 2132         sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
 2133         sc->rx_phy_err  += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
 2134         sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
 2135 }
 2136 
 2137 static void
 2138 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
 2139 {
 2140         uint32_t tmp;
 2141 
 2142         sc->intr_disable_mask &= ~intr_mask;
 2143         tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
 2144         RT_WRITE(sc, sc->fe_int_enable, tmp);
 2145 }
 2146 
 2147 static void
 2148 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
 2149 {
 2150         uint32_t tmp;
 2151 
 2152         sc->intr_disable_mask |= intr_mask;
 2153         tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
 2154         RT_WRITE(sc, sc->fe_int_enable, tmp);
 2155 }
 2156 
 2157 /*
 2158  * rt_txrx_enable - enable TX/RX DMA
 2159  */
 2160 static int
 2161 rt_txrx_enable(struct rt_softc *sc)
 2162 {
 2163         struct ifnet *ifp;
 2164         uint32_t tmp;
 2165         int ntries;
 2166 
 2167         ifp = sc->ifp;
 2168 
 2169         /* enable Tx/Rx DMA engine */
 2170         for (ntries = 0; ntries < 200; ntries++) {
 2171                 tmp = RT_READ(sc, sc->pdma_glo_cfg);
 2172                 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
 2173                         break;
 2174 
 2175                 DELAY(1000);
 2176         }
 2177 
 2178         if (ntries == 200) {
 2179                 device_printf(sc->dev, "timeout waiting for DMA engine\n");
 2180                 return (-1);
 2181         }
 2182 
 2183         DELAY(50);
 2184 
 2185         tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
 2186         RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
 2187 
 2188         /* XXX set Rx filter */
 2189         return (0);
 2190 }
 2191 
 2192 /*
 2193  * rt_alloc_rx_ring - allocate RX DMA ring buffer
 2194  */
 2195 static int
 2196 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
 2197 {
 2198         struct rt_rxdesc *desc;
 2199         struct rt_softc_rx_data *data;
 2200         bus_dma_segment_t segs[1];
 2201         int i, nsegs, error;
 2202 
 2203         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2204                 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2205                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
 2206                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
 2207                 0, NULL, NULL, &ring->desc_dma_tag);
 2208         if (error != 0) {
 2209                 device_printf(sc->dev,
 2210                     "could not create Rx desc DMA tag\n");
 2211                 goto fail;
 2212         }
 2213 
 2214         error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
 2215             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
 2216         if (error != 0) {
 2217                 device_printf(sc->dev,
 2218                     "could not allocate Rx desc DMA memory\n");
 2219                 goto fail;
 2220         }
 2221 
 2222         error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
 2223                 ring->desc,
 2224                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
 2225                 rt_dma_map_addr, &ring->desc_phys_addr, 0);
 2226         if (error != 0) {
 2227                 device_printf(sc->dev, "could not load Rx desc DMA map\n");
 2228                 goto fail;
 2229         }
 2230 
 2231         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2232             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2233                 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
 2234                 &ring->data_dma_tag);
 2235         if (error != 0) {
 2236                 device_printf(sc->dev,
 2237                     "could not create Rx data DMA tag\n");
 2238                 goto fail;
 2239         }
 2240 
 2241         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2242                 desc = &ring->desc[i];
 2243                 data = &ring->data[i];
 2244 
 2245                 error = bus_dmamap_create(ring->data_dma_tag, 0,
 2246                     &data->dma_map);
 2247                 if (error != 0) {
 2248                         device_printf(sc->dev, "could not create Rx data DMA "
 2249                             "map\n");
 2250                         goto fail;
 2251                 }
 2252 
 2253                 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
 2254                     MJUMPAGESIZE);
 2255                 if (data->m == NULL) {
 2256                         device_printf(sc->dev, "could not allocate Rx mbuf\n");
 2257                         error = ENOMEM;
 2258                         goto fail;
 2259                 }
 2260 
 2261                 data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
 2262 
 2263                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
 2264                     data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
 2265                 if (error != 0) {
 2266                         device_printf(sc->dev,
 2267                             "could not load Rx mbuf DMA map\n");
 2268                         goto fail;
 2269                 }
 2270 
 2271                 KASSERT(nsegs == 1, ("%s: too many DMA segments",
 2272                         device_get_nameunit(sc->dev)));
 2273 
 2274                 /* Add 2 for proper align of RX IP header */
 2275                 desc->sdp0 = htole32(segs[0].ds_addr+2);
 2276                 desc->sdl0 = htole32(segs[0].ds_len-2);
 2277         }
 2278 
 2279         error = bus_dmamap_create(ring->data_dma_tag, 0,
 2280             &ring->spare_dma_map);
 2281         if (error != 0) {
 2282                 device_printf(sc->dev,
 2283                     "could not create Rx spare DMA map\n");
 2284                 goto fail;
 2285         }
 2286 
 2287         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2288                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2289         ring->qid = qid;
 2290         return (0);
 2291 
 2292 fail:
 2293         rt_free_rx_ring(sc, ring);
 2294         return (error);
 2295 }
 2296 
 2297 /*
 2298  * rt_reset_rx_ring - reset RX ring buffer
 2299  */
 2300 static void
 2301 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
 2302 {
 2303         struct rt_rxdesc *desc;
 2304         int i;
 2305 
 2306         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2307                 desc = &ring->desc[i];
 2308                 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
 2309         }
 2310 
 2311         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2312                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2313         ring->cur = 0;
 2314 }
 2315 
 2316 /*
 2317  * rt_free_rx_ring - free memory used by RX ring buffer
 2318  */
 2319 static void
 2320 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
 2321 {
 2322         struct rt_softc_rx_data *data;
 2323         int i;
 2324 
 2325         if (ring->desc != NULL) {
 2326                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2327                         BUS_DMASYNC_POSTWRITE);
 2328                 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
 2329                 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
 2330                         ring->desc_dma_map);
 2331         }
 2332 
 2333         if (ring->desc_dma_tag != NULL)
 2334                 bus_dma_tag_destroy(ring->desc_dma_tag);
 2335 
 2336         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2337                 data = &ring->data[i];
 2338 
 2339                 if (data->m != NULL) {
 2340                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2341                                 BUS_DMASYNC_POSTREAD);
 2342                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2343                         m_freem(data->m);
 2344                 }
 2345 
 2346                 if (data->dma_map != NULL)
 2347                         bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
 2348         }
 2349 
 2350         if (ring->spare_dma_map != NULL)
 2351                 bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
 2352 
 2353         if (ring->data_dma_tag != NULL)
 2354                 bus_dma_tag_destroy(ring->data_dma_tag);
 2355 }
 2356 
 2357 /*
 2358  * rt_alloc_tx_ring - allocate TX ring buffer
 2359  */
 2360 static int
 2361 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
 2362 {
 2363         struct rt_softc_tx_data *data;
 2364         int error, i;
 2365 
 2366         mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
 2367 
 2368         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2369                 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2370                 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
 2371                 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
 2372                 0, NULL, NULL, &ring->desc_dma_tag);
 2373         if (error != 0) {
 2374                 device_printf(sc->dev,
 2375                     "could not create Tx desc DMA tag\n");
 2376                 goto fail;
 2377         }
 2378 
 2379         error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
 2380             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
 2381         if (error != 0) {
 2382                 device_printf(sc->dev,
 2383                     "could not allocate Tx desc DMA memory\n");
 2384                 goto fail;
 2385         }
 2386 
 2387         error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
 2388             ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT *
 2389             sizeof(struct rt_txdesc)), rt_dma_map_addr,
 2390             &ring->desc_phys_addr, 0);
 2391         if (error != 0) {
 2392                 device_printf(sc->dev, "could not load Tx desc DMA map\n");
 2393                 goto fail;
 2394         }
 2395 
 2396         ring->desc_queued = 0;
 2397         ring->desc_cur = 0;
 2398         ring->desc_next = 0;
 2399 
 2400         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2401             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2402             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
 2403             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
 2404             0, NULL, NULL, &ring->seg0_dma_tag);
 2405         if (error != 0) {
 2406                 device_printf(sc->dev,
 2407                     "could not create Tx seg0 DMA tag\n");
 2408                 goto fail;
 2409         }
 2410 
 2411         error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
 2412             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
 2413         if (error != 0) {
 2414                 device_printf(sc->dev,
 2415                     "could not allocate Tx seg0 DMA memory\n");
 2416                 goto fail;
 2417         }
 2418 
 2419         error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
 2420             ring->seg0,
 2421             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
 2422             rt_dma_map_addr, &ring->seg0_phys_addr, 0);
 2423         if (error != 0) {
 2424                 device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
 2425                 goto fail;
 2426         }
 2427 
 2428         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2429             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2430             MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
 2431             &ring->data_dma_tag);
 2432         if (error != 0) {
 2433                 device_printf(sc->dev,
 2434                     "could not create Tx data DMA tag\n");
 2435                 goto fail;
 2436         }
 2437 
 2438         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2439                 data = &ring->data[i];
 2440 
 2441                 error = bus_dmamap_create(ring->data_dma_tag, 0,
 2442                     &data->dma_map);
 2443                 if (error != 0) {
 2444                         device_printf(sc->dev, "could not create Tx data DMA "
 2445                             "map\n");
 2446                         goto fail;
 2447                 }
 2448         }
 2449 
 2450         ring->data_queued = 0;
 2451         ring->data_cur = 0;
 2452         ring->data_next = 0;
 2453 
 2454         ring->qid = qid;
 2455         return (0);
 2456 
 2457 fail:
 2458         rt_free_tx_ring(sc, ring);
 2459         return (error);
 2460 }
 2461 
 2462 /*
 2463  * rt_reset_tx_ring - reset TX ring buffer to empty state
 2464  */
 2465 static void
 2466 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 2467 {
 2468         struct rt_softc_tx_data *data;
 2469         struct rt_txdesc *desc;
 2470         int i;
 2471 
 2472         for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
 2473                 desc = &ring->desc[i];
 2474 
 2475                 desc->sdl0 = 0;
 2476                 desc->sdl1 = 0;
 2477         }
 2478 
 2479         ring->desc_queued = 0;
 2480         ring->desc_cur = 0;
 2481         ring->desc_next = 0;
 2482 
 2483         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2484                 BUS_DMASYNC_PREWRITE);
 2485 
 2486         bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
 2487                 BUS_DMASYNC_PREWRITE);
 2488 
 2489         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2490                 data = &ring->data[i];
 2491 
 2492                 if (data->m != NULL) {
 2493                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2494                                 BUS_DMASYNC_POSTWRITE);
 2495                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2496                         m_freem(data->m);
 2497                         data->m = NULL;
 2498                 }
 2499         }
 2500 
 2501         ring->data_queued = 0;
 2502         ring->data_cur = 0;
 2503         ring->data_next = 0;
 2504 }
 2505 
 2506 /*
 2507  * rt_free_tx_ring - free RX ring buffer
 2508  */
 2509 static void
 2510 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 2511 {
 2512         struct rt_softc_tx_data *data;
 2513         int i;
 2514 
 2515         if (ring->desc != NULL) {
 2516                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2517                         BUS_DMASYNC_POSTWRITE);
 2518                 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
 2519                 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
 2520                         ring->desc_dma_map);
 2521         }
 2522 
 2523         if (ring->desc_dma_tag != NULL)
 2524                 bus_dma_tag_destroy(ring->desc_dma_tag);
 2525 
 2526         if (ring->seg0 != NULL) {
 2527                 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
 2528                         BUS_DMASYNC_POSTWRITE);
 2529                 bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
 2530                 bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
 2531                         ring->seg0_dma_map);
 2532         }
 2533 
 2534         if (ring->seg0_dma_tag != NULL)
 2535                 bus_dma_tag_destroy(ring->seg0_dma_tag);
 2536 
 2537         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2538                 data = &ring->data[i];
 2539 
 2540                 if (data->m != NULL) {
 2541                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2542                                 BUS_DMASYNC_POSTWRITE);
 2543                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2544                         m_freem(data->m);
 2545                 }
 2546 
 2547                 if (data->dma_map != NULL)
 2548                         bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
 2549         }
 2550 
 2551         if (ring->data_dma_tag != NULL)
 2552                 bus_dma_tag_destroy(ring->data_dma_tag);
 2553 
 2554         mtx_destroy(&ring->lock);
 2555 }
 2556 
 2557 /*
 2558  * rt_dma_map_addr - get address of busdma segment
 2559  */
 2560 static void
 2561 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2562 {
 2563         if (error != 0)
 2564                 return;
 2565 
 2566         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
 2567 
 2568         *(bus_addr_t *) arg = segs[0].ds_addr;
 2569 }
 2570 
 2571 /*
 2572  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
 2573  */
 2574 static void
 2575 rt_sysctl_attach(struct rt_softc *sc)
 2576 {
 2577         struct sysctl_ctx_list *ctx;
 2578         struct sysctl_oid *tree;
 2579         struct sysctl_oid *stats;
 2580 
 2581         ctx = device_get_sysctl_ctx(sc->dev);
 2582         tree = device_get_sysctl_tree(sc->dev);
 2583 
 2584         /* statistic counters */
 2585         stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2586             "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "statistic");
 2587 
 2588         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2589             "interrupts", CTLFLAG_RD, &sc->interrupts,
 2590             "all interrupts");
 2591 
 2592         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2593             "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
 2594             "Tx coherent interrupts");
 2595 
 2596         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2597             "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
 2598             "Rx coherent interrupts");
 2599 
 2600         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2601             "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
 2602             "Rx interrupts");
 2603 
 2604         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2605             "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
 2606             "Rx delay interrupts");
 2607 
 2608         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2609             "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
 2610             "Tx AC3 interrupts");
 2611 
 2612         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2613             "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
 2614             "Tx AC2 interrupts");
 2615 
 2616         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2617             "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
 2618             "Tx AC1 interrupts");
 2619 
 2620         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2621             "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
 2622             "Tx AC0 interrupts");
 2623 
 2624         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2625             "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
 2626             "Tx delay interrupts");
 2627 
 2628         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2629             "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
 2630             0, "Tx AC3 descriptors queued");
 2631 
 2632         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2633             "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
 2634             0, "Tx AC3 data queued");
 2635 
 2636         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2637             "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
 2638             0, "Tx AC2 descriptors queued");
 2639 
 2640         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2641             "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
 2642             0, "Tx AC2 data queued");
 2643 
 2644         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2645             "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
 2646             0, "Tx AC1 descriptors queued");
 2647 
 2648         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2649             "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
 2650             0, "Tx AC1 data queued");
 2651 
 2652         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2653             "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
 2654             0, "Tx AC0 descriptors queued");
 2655 
 2656         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2657             "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
 2658             0, "Tx AC0 data queued");
 2659 
 2660         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2661             "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
 2662             "Tx AC3 data queue full");
 2663 
 2664         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2665             "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
 2666             "Tx AC2 data queue full");
 2667 
 2668         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2669             "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
 2670             "Tx AC1 data queue full");
 2671 
 2672         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2673             "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
 2674             "Tx AC0 data queue full");
 2675 
 2676         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2677             "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
 2678             "Tx watchdog timeouts");
 2679 
 2680         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2681             "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
 2682             "Tx defragmented packets");
 2683 
 2684         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2685             "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
 2686             "no Tx descriptors available");
 2687 
 2688         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2689             "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
 2690             "Rx mbuf allocation errors");
 2691 
 2692         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2693             "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
 2694             "Rx mbuf DMA mapping errors");
 2695 
 2696         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2697             "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
 2698             "Tx queue 0 not empty");
 2699 
 2700         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2701             "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
 2702             "Tx queue 1 not empty");
 2703 
 2704         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2705             "rx_packets", CTLFLAG_RD, &sc->rx_packets,
 2706             "Rx packets");
 2707 
 2708         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2709             "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
 2710             "Rx CRC errors");
 2711 
 2712         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2713             "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
 2714             "Rx PHY errors");
 2715 
 2716         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2717             "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
 2718             "Rx duplicate packets");
 2719 
 2720         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2721             "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
 2722             "Rx FIFO overflows");
 2723 
 2724         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2725             "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
 2726             "Rx bytes");
 2727 
 2728         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2729             "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
 2730             "Rx too long frame errors");
 2731 
 2732         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2733             "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
 2734             "Rx too short frame errors");
 2735 
 2736         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2737             "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
 2738             "Tx bytes");
 2739 
 2740         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2741             "tx_packets", CTLFLAG_RD, &sc->tx_packets,
 2742             "Tx packets");
 2743 
 2744         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2745             "tx_skip", CTLFLAG_RD, &sc->tx_skip,
 2746             "Tx skip count for GDMA ports");
 2747 
 2748         SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2749             "tx_collision", CTLFLAG_RD, &sc->tx_collision,
 2750             "Tx collision count for GDMA ports");
 2751 }
 2752 
 2753 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
 2754 /* This code is only work RT2880 and same chip. */
 2755 /* TODO: make RT3052 and later support code. But nobody need it? */
 2756 static int
 2757 rt_miibus_readreg(device_t dev, int phy, int reg)
 2758 {
 2759         struct rt_softc *sc = device_get_softc(dev);
 2760         int dat;
 2761 
 2762         /*
 2763          * PSEUDO_PHYAD is a special value for indicate switch attached.
 2764          * No one PHY use PSEUDO_PHYAD (0x1e) address.
 2765          */
 2766 #ifndef RT_MDIO
 2767         if (phy == 31) {
 2768                 /* Fake PHY ID for bfeswitch attach */
 2769                 switch (reg) {
 2770                 case MII_BMSR:
 2771                         return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
 2772                 case MII_PHYIDR1:
 2773                         return (0x40);          /* As result of faking */
 2774                 case MII_PHYIDR2:               /* PHY will detect as */
 2775                         return (0x6250);                /* bfeswitch */
 2776                 }
 2777         }
 2778 #endif
 2779 
 2780         /* Wait prev command done if any */
 2781         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2782         dat = ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
 2783             ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK);
 2784         RT_WRITE(sc, MDIO_ACCESS, dat);
 2785         RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
 2786         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2787 
 2788         return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
 2789 }
 2790 
 2791 static int
 2792 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
 2793 {
 2794         struct rt_softc *sc = device_get_softc(dev);
 2795         int dat;
 2796 
 2797         /* Wait prev command done if any */
 2798         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2799         dat = MDIO_CMD_WR |
 2800             ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
 2801             ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) |
 2802             (val & MDIO_PHY_DATA_MASK);
 2803         RT_WRITE(sc, MDIO_ACCESS, dat);
 2804         RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
 2805         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2806 
 2807         return (0);
 2808 }
 2809 #endif
 2810 
 2811 #ifdef IF_RT_PHY_SUPPORT
 2812 void
 2813 rt_miibus_statchg(device_t dev)
 2814 {
 2815         struct rt_softc *sc = device_get_softc(dev);
 2816         struct mii_data *mii;
 2817 
 2818         mii = device_get_softc(sc->rt_miibus);
 2819 
 2820         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 2821             (IFM_ACTIVE | IFM_AVALID)) {
 2822                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 2823                 case IFM_10_T:
 2824                 case IFM_100_TX:
 2825                         /* XXX check link here */
 2826                         sc->flags |= 1;
 2827                         break;
 2828                 default:
 2829                         break;
 2830                 }
 2831         }
 2832 }
 2833 #endif /* IF_RT_PHY_SUPPORT */
 2834 
 2835 static device_method_t rt_dev_methods[] =
 2836 {
 2837         DEVMETHOD(device_probe, rt_probe),
 2838         DEVMETHOD(device_attach, rt_attach),
 2839         DEVMETHOD(device_detach, rt_detach),
 2840         DEVMETHOD(device_shutdown, rt_shutdown),
 2841         DEVMETHOD(device_suspend, rt_suspend),
 2842         DEVMETHOD(device_resume, rt_resume),
 2843 
 2844 #ifdef IF_RT_PHY_SUPPORT
 2845         /* MII interface */
 2846         DEVMETHOD(miibus_readreg,       rt_miibus_readreg),
 2847         DEVMETHOD(miibus_writereg,      rt_miibus_writereg),
 2848         DEVMETHOD(miibus_statchg,       rt_miibus_statchg),
 2849 #endif
 2850 
 2851         DEVMETHOD_END
 2852 };
 2853 
 2854 static driver_t rt_driver =
 2855 {
 2856         "rt",
 2857         rt_dev_methods,
 2858         sizeof(struct rt_softc)
 2859 };
 2860 
 2861 DRIVER_MODULE(rt, nexus, rt_driver, 0, 0);
 2862 #ifdef FDT
 2863 DRIVER_MODULE(rt, simplebus, rt_driver, 0, 0);
 2864 #endif
 2865 
 2866 MODULE_DEPEND(rt, ether, 1, 1, 1);
 2867 MODULE_DEPEND(rt, miibus, 1, 1, 1);
 2868 
 2869 #ifdef RT_MDIO       
 2870 MODULE_DEPEND(rt, mdio, 1, 1, 1);
 2871 
 2872 static int rtmdio_probe(device_t);
 2873 static int rtmdio_attach(device_t);
 2874 static int rtmdio_detach(device_t);
 2875 
 2876 static struct mtx miibus_mtx;
 2877 
 2878 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "rt mii lock", MTX_DEF);
 2879 
 2880 /*
 2881  * Declare an additional, separate driver for accessing the MDIO bus.
 2882  */
 2883 static device_method_t rtmdio_methods[] = {
 2884         /* Device interface */
 2885         DEVMETHOD(device_probe,         rtmdio_probe),
 2886         DEVMETHOD(device_attach,        rtmdio_attach),
 2887         DEVMETHOD(device_detach,        rtmdio_detach),
 2888 
 2889         /* bus interface */
 2890         DEVMETHOD(bus_add_child,        device_add_child_ordered),
 2891 
 2892         /* MDIO access */
 2893         DEVMETHOD(mdio_readreg,         rt_miibus_readreg),
 2894         DEVMETHOD(mdio_writereg,        rt_miibus_writereg),
 2895 };
 2896 
 2897 DEFINE_CLASS_0(rtmdio, rtmdio_driver, rtmdio_methods,
 2898     sizeof(struct rt_softc));
 2899 
 2900 DRIVER_MODULE(miiproxy, rt, miiproxy_driver, 0, 0);
 2901 DRIVER_MODULE(rtmdio, simplebus, rtmdio_driver, 0, 0);
 2902 DRIVER_MODULE(mdio, rtmdio, mdio_driver, 0, 0);
 2903 
 2904 static int
 2905 rtmdio_probe(device_t dev)
 2906 {
 2907         if (!ofw_bus_status_okay(dev))
 2908                 return (ENXIO);
 2909 
 2910         if (!ofw_bus_is_compatible(dev, "ralink,rt2880-mdio"))
 2911                 return (ENXIO);
 2912 
 2913         device_set_desc(dev, "RT built-in ethernet interface, MDIO controller");
 2914         return(0);
 2915 }
 2916 
 2917 static int
 2918 rtmdio_attach(device_t dev)
 2919 {
 2920         struct rt_softc *sc;
 2921         int     error;
 2922 
 2923         sc = device_get_softc(dev);
 2924         sc->dev = dev;
 2925         sc->mem_rid = 0;
 2926         sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
 2927             &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE);
 2928         if (sc->mem == NULL) {
 2929                 device_printf(dev, "couldn't map memory\n");
 2930                 error = ENXIO;
 2931                 goto fail;
 2932         }
 2933 
 2934         sc->bst = rman_get_bustag(sc->mem);
 2935         sc->bsh = rman_get_bushandle(sc->mem);
 2936 
 2937         bus_generic_probe(dev);
 2938         bus_enumerate_hinted_children(dev);
 2939         error = bus_generic_attach(dev);
 2940 fail:
 2941         return(error);
 2942 }
 2943 
 2944 static int
 2945 rtmdio_detach(device_t dev)
 2946 {
 2947         return(0);
 2948 }
 2949 #endif

Cache object: 071ab829380b578ab74984bfd52c47ec


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.