The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/rt/if_rt.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2011, Aleksandr Rybalko
    3  * based on hard work
    4  * by Alexander Egorenkov <egorenar@gmail.com>
    5  * and by Damien Bergamini <damien.bergamini@free.fr>
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice unmodified, this list of conditions, and the following
   13  *    disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/9.0/sys/dev/rt/if_rt.c 223929 2011-07-11 08:42:09Z ray $");
   33 
   34 #include "if_rtvar.h"
   35 #include "if_rtreg.h"
   36 
   37 #include <net/if.h>
   38 #include <net/if_arp.h>
   39 #include <net/ethernet.h>
   40 #include <net/if_dl.h>
   41 #include <net/if_media.h>
   42 #include <net/if_types.h>
   43 #include <net/if_vlan_var.h>
   44 
   45 #include <net/bpf.h>
   46 
   47 #include <machine/bus.h>
   48 #include <machine/cache.h>
   49 #include <machine/cpufunc.h>
   50 #include <machine/resource.h>
   51 #include <vm/vm_param.h>
   52 #include <vm/vm.h>
   53 #include <vm/pmap.h>
   54 #include <machine/pmap.h>
   55 #include <sys/bus.h>
   56 #include <sys/rman.h>
   57 
   58 #include <dev/mii/mii.h>
   59 #include <dev/mii/miivar.h>
   60 
   61 #include <mips/rt305x/rt305x_sysctlvar.h>
   62 #include <mips/rt305x/rt305xreg.h>
   63 
   64 #ifdef IF_RT_PHY_SUPPORT
   65 #include "miibus_if.h"
   66 #endif
   67 
   68 /*
   69  * Defines and macros
   70  */
   71 #define RT_MAX_AGG_SIZE                 3840
   72 
   73 #define RT_TX_DATA_SEG0_SIZE            MJUMPAGESIZE
   74 
   75 #define RT_MS(_v, _f)                   (((_v) & _f) >> _f##_S)
   76 #define RT_SM(_v, _f)                   (((_v) << _f##_S) & _f)
   77 
   78 #define RT_TX_WATCHDOG_TIMEOUT          5
   79 
   80 /*
   81  * Static function prototypes
   82  */
   83 static int      rt_probe(device_t dev);
   84 static int      rt_attach(device_t dev);
   85 static int      rt_detach(device_t dev);
   86 static int      rt_shutdown(device_t dev);
   87 static int      rt_suspend(device_t dev);
   88 static int      rt_resume(device_t dev);
   89 static void     rt_init_locked(void *priv);
   90 static void     rt_init(void *priv);
   91 static void     rt_stop_locked(void *priv);
   92 static void     rt_stop(void *priv);
   93 static void     rt_start(struct ifnet *ifp);
   94 static int      rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
   95 static void     rt_periodic(void *arg);
   96 static void     rt_tx_watchdog(void *arg);
   97 static void     rt_intr(void *arg);
   98 static void     rt_tx_coherent_intr(struct rt_softc *sc);
   99 static void     rt_rx_coherent_intr(struct rt_softc *sc);
  100 static void     rt_rx_delay_intr(struct rt_softc *sc);
  101 static void     rt_tx_delay_intr(struct rt_softc *sc);
  102 static void     rt_rx_intr(struct rt_softc *sc);
  103 static void     rt_tx_intr(struct rt_softc *sc, int qid);
  104 static void     rt_rx_done_task(void *context, int pending);
  105 static void     rt_tx_done_task(void *context, int pending);
  106 static void     rt_periodic_task(void *context, int pending);
  107 static int      rt_rx_eof(struct rt_softc *sc, int limit);
  108 static void     rt_tx_eof(struct rt_softc *sc,
  109                     struct rt_softc_tx_ring *ring);
  110 static void     rt_update_stats(struct rt_softc *sc);
  111 static void     rt_watchdog(struct rt_softc *sc);
  112 static void     rt_update_raw_counters(struct rt_softc *sc);
  113 static void     rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
  114 static void     rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
  115 static int      rt_txrx_enable(struct rt_softc *sc);
  116 static int      rt_alloc_rx_ring(struct rt_softc *sc,
  117                     struct rt_softc_rx_ring *ring);
  118 static void     rt_reset_rx_ring(struct rt_softc *sc,
  119                     struct rt_softc_rx_ring *ring);
  120 static void     rt_free_rx_ring(struct rt_softc *sc,
  121                     struct rt_softc_rx_ring *ring);
  122 static int      rt_alloc_tx_ring(struct rt_softc *sc,
  123                     struct rt_softc_tx_ring *ring, int qid);
  124 static void     rt_reset_tx_ring(struct rt_softc *sc,
  125                     struct rt_softc_tx_ring *ring);
  126 static void     rt_free_tx_ring(struct rt_softc *sc,
  127                     struct rt_softc_tx_ring *ring);
  128 static void     rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
  129                     int nseg, int error);
  130 static void     rt_sysctl_attach(struct rt_softc *sc);
  131 #ifdef IF_RT_PHY_SUPPORT
  132 void            rt_miibus_statchg(device_t);
  133 static int      rt_miibus_readreg(device_t, int, int);
  134 static int      rt_miibus_writereg(device_t, int, int, int);
  135 #endif
  136 static int      rt_ifmedia_upd(struct ifnet *);
  137 static void     rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
  138 
  139 SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
  140 #ifdef IF_RT_DEBUG
  141 static int rt_debug = 0;
  142 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RW, &rt_debug, 0,
  143     "RT debug level");
  144 TUNABLE_INT("hw.rt.debug", &rt_debug);
  145 #endif
  146 
  147 static int
  148 rt_probe(device_t dev)
  149 {
  150         device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
  151         return (0);
  152 }
  153 
  154 /*
  155  * macaddr_atoi - translate string MAC address to uint8_t array
  156  */
  157 static int
  158 macaddr_atoi(const char *str, uint8_t *mac)
  159 {
  160         int count, i;
  161         unsigned int amac[ETHER_ADDR_LEN];      /* Aligned version */
  162 
  163         count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
  164             &amac[0], &amac[1], &amac[2],
  165             &amac[3], &amac[4], &amac[5]);
  166         if (count < ETHER_ADDR_LEN) {
  167                 memset(mac, 0, ETHER_ADDR_LEN);
  168                 return (1);
  169         }
  170 
  171         /* Copy aligned to result */
  172         for (i = 0; i < ETHER_ADDR_LEN; i ++)
  173                 mac[i] = (amac[i] & 0xff);
  174 
  175         return (0);
  176 }
  177 
  178 #ifdef USE_GENERATED_MAC_ADDRESS
  179 static char *
  180 kernenv_next(char *cp)
  181 {
  182 
  183         if (cp != NULL) {
  184                 while (*cp != 0)
  185                         cp++;
  186                 cp++;
  187                 if (*cp == 0)
  188                         cp = NULL;
  189         }
  190         return (cp);
  191 }
  192 
  193 /*
  194  * generate_mac(uin8_t *mac)
  195  * This is MAC address generator for cases when real device MAC address
  196  * unknown or not yet accessible.
  197  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
  198  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
  199  *
  200  * Output - MAC address, that do not change between reboots, if hints or
  201  * bootloader info unchange.
  202  */
  203 static void
  204 generate_mac(uint8_t *mac)
  205 {
  206         unsigned char *cp;
  207         int i = 0;
  208         uint32_t crc = 0xffffffff;
  209 
  210         /* Generate CRC32 on kenv */
  211         if (dynamic_kenv) {
  212                 for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
  213                         crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
  214                 }
  215         } else {
  216                 for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
  217                         crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
  218                 }
  219         }
  220         crc = ~crc;
  221 
  222         mac[0] = 'b';
  223         mac[1] = 's';
  224         mac[2] = 'd';
  225         mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
  226         mac[4] = (crc >> 8) & 0xff;
  227         mac[5] = crc & 0xff;
  228 }
  229 #endif
  230 
  231 /*
  232  * ether_request_mac - try to find usable MAC address.
  233  */
  234 static int
  235 ether_request_mac(device_t dev, uint8_t *mac)
  236 {
  237         char *var;
  238 
  239         /*
  240          * "ethaddr" is passed via envp on RedBoot platforms
  241          * "kmac" is passed via argv on RouterBOOT platforms
  242          */
  243 #if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
  244         if ((var = getenv("ethaddr")) != NULL ||
  245             (var = getenv("kmac")) != NULL ) {
  246 
  247                 if(!macaddr_atoi(var, mac)) {
  248                         printf("%s: use %s macaddr from KENV\n",
  249                             device_get_nameunit(dev), var);
  250                         freeenv(var);
  251                         return (0);
  252                 }
  253                 freeenv(var);
  254         }
  255 #endif
  256 
  257         /*
  258          * Try from hints
  259          * hint.[dev].[unit].macaddr
  260          */
  261         if (!resource_string_value(device_get_name(dev),
  262             device_get_unit(dev), "macaddr", (const char **)&var)) {
  263 
  264                 if(!macaddr_atoi(var, mac)) {
  265                         printf("%s: use %s macaddr from hints\n",
  266                             device_get_nameunit(dev), var);
  267                         return (0);
  268                 }
  269         }
  270 
  271 #ifdef USE_GENERATED_MAC_ADDRESS
  272         generate_mac(mac);
  273 
  274         device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
  275             "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  276 #else
  277         /* Hardcoded */
  278         mac[0] = 0x00;
  279         mac[1] = 0x18;
  280         mac[2] = 0xe7;
  281         mac[3] = 0xd5;
  282         mac[4] = 0x83;
  283         mac[5] = 0x90;
  284 
  285         device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
  286 #endif
  287 
  288         return (0);
  289 }
  290 
  291 static int
  292 rt_attach(device_t dev)
  293 {
  294         struct rt_softc *sc;
  295         struct ifnet *ifp;
  296         int error, i;
  297 
  298         sc = device_get_softc(dev);
  299         sc->dev = dev;
  300 
  301         mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
  302             MTX_DEF | MTX_RECURSE);
  303 
  304         sc->mem_rid = 0;
  305         sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
  306             RF_ACTIVE);
  307         if (sc->mem == NULL) {
  308                 device_printf(dev, "could not allocate memory resource\n");
  309                 error = ENXIO;
  310                 goto fail;
  311         }
  312 
  313         sc->bst = rman_get_bustag(sc->mem);
  314         sc->bsh = rman_get_bushandle(sc->mem);
  315 
  316         sc->irq_rid = 0;
  317         sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
  318             RF_ACTIVE);
  319         if (sc->irq == NULL) {
  320                 device_printf(dev,
  321                     "could not allocate interrupt resource\n");
  322                 error = ENXIO;
  323                 goto fail;
  324         }
  325 
  326 #ifdef IF_RT_DEBUG
  327         sc->debug = rt_debug;
  328 
  329         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
  330                 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  331                 "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
  332 #endif
  333 
  334         device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
  335             sc->mac_rev);
  336 
  337         /* Reset hardware */
  338         RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
  339 
  340         RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
  341             (
  342             GDM_ICS_EN | /* Enable IP Csum */
  343             GDM_TCS_EN | /* Enable TCP Csum */
  344             GDM_UCS_EN | /* Enable UDP Csum */
  345             GDM_STRPCRC | /* Strip CRC from packet */
  346             GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
  347             GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
  348             GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
  349             GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
  350             ));
  351 
  352         /* allocate Tx and Rx rings */
  353         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  354                 error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
  355                 if (error != 0) {
  356                         device_printf(dev, "could not allocate Tx ring #%d\n",
  357                             i);
  358                         goto fail;
  359                 }
  360         }
  361 
  362         sc->tx_ring_mgtqid = 5;
  363 
  364         error = rt_alloc_rx_ring(sc, &sc->rx_ring);
  365         if (error != 0) {
  366                 device_printf(dev, "could not allocate Rx ring\n");
  367                 goto fail;
  368         }
  369 
  370         callout_init(&sc->periodic_ch, 0);
  371         callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
  372 
  373         ifp = sc->ifp = if_alloc(IFT_ETHER);
  374         if (ifp == NULL) {
  375                 device_printf(dev, "could not if_alloc()\n");
  376                 error = ENOMEM;
  377                 goto fail;
  378         }
  379 
  380         ifp->if_softc = sc;
  381         if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
  382         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  383         ifp->if_init = rt_init;
  384         ifp->if_ioctl = rt_ioctl;
  385         ifp->if_start = rt_start;
  386         ifp->if_mtu = ETHERMTU;
  387 #define RT_TX_QLEN      256
  388 
  389         IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
  390         ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
  391         IFQ_SET_READY(&ifp->if_snd);
  392 
  393 #ifdef IF_RT_PHY_SUPPORT
  394         error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
  395             rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
  396         if (error != 0) {
  397                 device_printf(dev, "attaching PHYs failed\n");
  398                 error = ENXIO;
  399                 goto fail;
  400         }
  401 #else
  402         ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
  403         ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
  404             NULL);
  405         ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
  406 
  407 #endif /* IF_RT_PHY_SUPPORT */
  408 
  409         ether_request_mac(dev, sc->mac_addr);
  410         ether_ifattach(ifp, sc->mac_addr);
  411 
  412         /*
  413          * Tell the upper layer(s) we support long frames.
  414          */
  415         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  416         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  417         ifp->if_capenable |= IFCAP_VLAN_MTU;
  418         ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
  419         ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
  420 
  421         /* init task queue */
  422         TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
  423         TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
  424         TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
  425 
  426         sc->rx_process_limit = 100;
  427 
  428         sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
  429             taskqueue_thread_enqueue, &sc->taskqueue);
  430 
  431         taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
  432             device_get_nameunit(sc->dev));
  433 
  434         rt_sysctl_attach(sc);
  435 
  436         /* set up interrupt */
  437         error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
  438             NULL, rt_intr, sc, &sc->irqh);
  439         if (error != 0) {
  440                 printf("%s: could not set up interrupt\n",
  441                         device_get_nameunit(dev));
  442                 goto fail;
  443         }
  444 #ifdef IF_RT_DEBUG
  445         device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
  446 #endif
  447 
  448         return (0);
  449 
  450 fail:
  451         /* free Tx and Rx rings */
  452         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  453                 rt_free_tx_ring(sc, &sc->tx_ring[i]);
  454 
  455         rt_free_rx_ring(sc, &sc->rx_ring);
  456 
  457         mtx_destroy(&sc->lock);
  458 
  459         if (sc->mem != NULL)
  460                 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
  461                     sc->mem);
  462 
  463         if (sc->irq != NULL)
  464                 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
  465                     sc->irq);
  466 
  467         return (error);
  468 }
  469 
  470 /*
  471  * Set media options.
  472  */
  473 static int
  474 rt_ifmedia_upd(struct ifnet *ifp)
  475 {
  476         struct rt_softc *sc;
  477 #ifdef IF_RT_PHY_SUPPORT
  478         struct mii_data *mii;
  479         int error = 0;
  480 
  481         sc = ifp->if_softc;
  482         RT_SOFTC_LOCK(sc);
  483 
  484         mii = device_get_softc(sc->rt_miibus);
  485         if (mii->mii_instance) {
  486                 struct mii_softc *miisc;
  487                 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
  488                                 miisc = LIST_NEXT(miisc, mii_list))
  489                         mii_phy_reset(miisc);
  490         }
  491         if (mii)
  492                 error = mii_mediachg(mii);
  493         RT_SOFTC_UNLOCK(sc);
  494 
  495         return (error);
  496 
  497 #else /* !IF_RT_PHY_SUPPORT */
  498 
  499         struct ifmedia *ifm;
  500         struct ifmedia_entry *ife;
  501 
  502         sc = ifp->if_softc;
  503         ifm = &sc->rt_ifmedia;
  504         ife = ifm->ifm_cur;
  505 
  506         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
  507                 return (EINVAL);
  508 
  509         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
  510                 device_printf(sc->dev,
  511                     "AUTO is not supported for multiphy MAC");
  512                 return (EINVAL);
  513         }
  514 
  515         /*
  516          * Ignore everything
  517          */
  518         return (0);
  519 #endif /* IF_RT_PHY_SUPPORT */
  520 }
  521 
  522 /*
  523  * Report current media status.
  524  */
  525 static void
  526 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  527 {
  528 #ifdef IF_RT_PHY_SUPPORT
  529         struct rt_softc *sc;
  530         struct mii_data *mii;
  531 
  532         sc = ifp->if_softc;
  533 
  534         RT_SOFTC_LOCK(sc);
  535         mii = device_get_softc(sc->rt_miibus);
  536         mii_pollstat(mii);
  537         ifmr->ifm_active = mii->mii_media_active;
  538         ifmr->ifm_status = mii->mii_media_status;
  539         ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
  540         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
  541         RT_SOFTC_UNLOCK(sc);
  542 #else /* !IF_RT_PHY_SUPPORT */
  543 
  544         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
  545         ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
  546 #endif /* IF_RT_PHY_SUPPORT */
  547 }
  548 
  549 static int
  550 rt_detach(device_t dev)
  551 {
  552         struct rt_softc *sc;
  553         struct ifnet *ifp;
  554         int i;
  555 
  556         sc = device_get_softc(dev);
  557         ifp = sc->ifp;
  558 
  559         RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
  560 
  561         RT_SOFTC_LOCK(sc);
  562 
  563         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  564 
  565         callout_stop(&sc->periodic_ch);
  566         callout_stop(&sc->tx_watchdog_ch);
  567 
  568         taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
  569         taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
  570         taskqueue_drain(sc->taskqueue, &sc->periodic_task);
  571 
  572         /* free Tx and Rx rings */
  573         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  574                 rt_free_tx_ring(sc, &sc->tx_ring[i]);
  575 
  576         rt_free_rx_ring(sc, &sc->rx_ring);
  577 
  578         RT_SOFTC_UNLOCK(sc);
  579 
  580 #ifdef IF_RT_PHY_SUPPORT
  581         if (sc->rt_miibus != NULL)
  582                 device_delete_child(dev, sc->rt_miibus);
  583 #endif
  584 
  585         ether_ifdetach(ifp);
  586         if_free(ifp);
  587 
  588         taskqueue_free(sc->taskqueue);
  589 
  590         mtx_destroy(&sc->lock);
  591 
  592         bus_generic_detach(dev);
  593         bus_teardown_intr(dev, sc->irq, sc->irqh);
  594         bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
  595         bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
  596 
  597         return (0);
  598 }
  599 
  600 static int
  601 rt_shutdown(device_t dev)
  602 {
  603         struct rt_softc *sc;
  604 
  605         sc = device_get_softc(dev);
  606         RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
  607         rt_stop(sc);
  608 
  609         return (0);
  610 }
  611 
  612 static int
  613 rt_suspend(device_t dev)
  614 {
  615         struct rt_softc *sc;
  616 
  617         sc = device_get_softc(dev);
  618         RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
  619         rt_stop(sc);
  620 
  621         return (0);
  622 }
  623 
  624 static int
  625 rt_resume(device_t dev)
  626 {
  627         struct rt_softc *sc;
  628         struct ifnet *ifp;
  629 
  630         sc = device_get_softc(dev);
  631         ifp = sc->ifp;
  632 
  633         RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
  634 
  635         if (ifp->if_flags & IFF_UP)
  636                 rt_init(sc);
  637 
  638         return (0);
  639 }
  640 
  641 /*
  642  * rt_init_locked - Run initialization process having locked mtx.
  643  */
  644 static void
  645 rt_init_locked(void *priv)
  646 {
  647         struct rt_softc *sc;
  648         struct ifnet *ifp;
  649 #ifdef IF_RT_PHY_SUPPORT
  650         struct mii_data *mii;
  651 #endif
  652         int i, ntries;
  653         uint32_t tmp;
  654 
  655         sc = priv;
  656         ifp = sc->ifp;
  657 #ifdef IF_RT_PHY_SUPPORT
  658         mii = device_get_softc(sc->rt_miibus);
  659 #endif
  660 
  661         RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
  662 
  663         RT_SOFTC_ASSERT_LOCKED(sc);
  664 
  665         /* hardware reset */
  666         RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
  667         rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
  668 
  669         /* Fwd to CPU (uni|broad|multi)cast and Unknown */
  670         RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
  671             (
  672             GDM_ICS_EN | /* Enable IP Csum */
  673             GDM_TCS_EN | /* Enable TCP Csum */
  674             GDM_UCS_EN | /* Enable UDP Csum */
  675             GDM_STRPCRC | /* Strip CRC from packet */
  676             GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
  677             GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
  678             GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
  679             GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
  680             ));
  681 
  682         /* disable DMA engine */
  683         RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
  684         RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
  685 
  686         /* wait while DMA engine is busy */
  687         for (ntries = 0; ntries < 100; ntries++) {
  688                 tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
  689                 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
  690                         break;
  691                 DELAY(1000);
  692         }
  693 
  694         if (ntries == 100) {
  695                 device_printf(sc->dev, "timeout waiting for DMA engine\n");
  696                 goto fail;
  697         }
  698 
  699         /* reset Rx and Tx rings */
  700         tmp = FE_RST_DRX_IDX0 |
  701                 FE_RST_DTX_IDX3 |
  702                 FE_RST_DTX_IDX2 |
  703                 FE_RST_DTX_IDX1 |
  704                 FE_RST_DTX_IDX0;
  705 
  706         RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
  707 
  708         /* XXX switch set mac address */
  709         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
  710                 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
  711 
  712         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
  713                 /* update TX_BASE_PTRx */
  714                 RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
  715                         sc->tx_ring[i].desc_phys_addr);
  716                 RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
  717                         RT_SOFTC_TX_RING_DESC_COUNT);
  718                 RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
  719         }
  720 
  721         /* init Rx ring */
  722         rt_reset_rx_ring(sc, &sc->rx_ring);
  723 
  724         /* update RX_BASE_PTR0 */
  725         RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
  726                 sc->rx_ring.desc_phys_addr);
  727         RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
  728                 RT_SOFTC_RX_RING_DATA_COUNT);
  729         RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
  730                 RT_SOFTC_RX_RING_DATA_COUNT - 1);
  731 
  732         /* write back DDONE, 16byte burst enable RX/TX DMA */
  733         RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG,
  734             FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
  735 
  736         /* disable interrupts mitigation */
  737         RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
  738 
  739         /* clear pending interrupts */
  740         RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
  741 
  742         /* enable interrupts */
  743         tmp =   CNT_PPE_AF |
  744                 CNT_GDM_AF |
  745                 PSE_P2_FC |
  746                 GDM_CRC_DROP |
  747                 PSE_BUF_DROP |
  748                 GDM_OTHER_DROP |
  749                 PSE_P1_FC |
  750                 PSE_P0_FC |
  751                 PSE_FQ_EMPTY |
  752                 INT_TX_COHERENT |
  753                 INT_RX_COHERENT |
  754                 INT_TXQ3_DONE |
  755                 INT_TXQ2_DONE |
  756                 INT_TXQ1_DONE |
  757                 INT_TXQ0_DONE |
  758                 INT_RX_DONE;
  759 
  760         sc->intr_enable_mask = tmp;
  761 
  762         RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
  763 
  764         if (rt_txrx_enable(sc) != 0)
  765                 goto fail;
  766 
  767 #ifdef IF_RT_PHY_SUPPORT
  768         if (mii) mii_mediachg(mii);
  769 #endif /* IF_RT_PHY_SUPPORT */
  770 
  771         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  772         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  773 
  774         sc->periodic_round = 0;
  775 
  776         callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
  777 
  778         return;
  779 
  780 fail:
  781         rt_stop_locked(sc);
  782 }
  783 
  784 /*
  785  * rt_init - lock and initialize device.
  786  */
  787 static void
  788 rt_init(void *priv)
  789 {
  790         struct rt_softc *sc;
  791 
  792         sc = priv;
  793         RT_SOFTC_LOCK(sc);
  794         rt_init_locked(sc);
  795         RT_SOFTC_UNLOCK(sc);
  796 }
  797 
  798 /*
  799  * rt_stop_locked - stop TX/RX w/ lock
  800  */
  801 static void
  802 rt_stop_locked(void *priv)
  803 {
  804         struct rt_softc *sc;
  805         struct ifnet *ifp;
  806 
  807         sc = priv;
  808         ifp = sc->ifp;
  809 
  810         RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
  811 
  812         RT_SOFTC_ASSERT_LOCKED(sc);
  813         sc->tx_timer = 0;
  814         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  815         callout_stop(&sc->periodic_ch);
  816         callout_stop(&sc->tx_watchdog_ch);
  817         RT_SOFTC_UNLOCK(sc);
  818         taskqueue_block(sc->taskqueue);
  819 
  820         /*
  821          * Sometime rt_stop_locked called from isr and we get panic
  822          * When found, I fix it
  823          */
  824 #ifdef notyet
  825         taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
  826         taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
  827         taskqueue_drain(sc->taskqueue, &sc->periodic_task);
  828 #endif
  829         RT_SOFTC_LOCK(sc);
  830 
  831         /* disable interrupts */
  832         RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
  833 
  834         /* reset adapter */
  835         RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
  836 
  837         RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
  838             (
  839             GDM_ICS_EN | /* Enable IP Csum */
  840             GDM_TCS_EN | /* Enable TCP Csum */
  841             GDM_UCS_EN | /* Enable UDP Csum */
  842             GDM_STRPCRC | /* Strip CRC from packet */
  843             GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
  844             GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
  845             GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
  846             GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
  847             ));
  848 }
  849 
  850 static void
  851 rt_stop(void *priv)
  852 {
  853         struct rt_softc *sc;
  854 
  855         sc = priv;
  856         RT_SOFTC_LOCK(sc);
  857         rt_stop_locked(sc);
  858         RT_SOFTC_UNLOCK(sc);
  859 }
  860 
  861 /*
  862  * rt_tx_data - transmit packet.
  863  */
  864 static int
  865 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
  866 {
  867         struct ifnet *ifp;
  868         struct rt_softc_tx_ring *ring;
  869         struct rt_softc_tx_data *data;
  870         struct rt_txdesc *desc;
  871         struct mbuf *m_d;
  872         bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
  873         int error, ndmasegs, ndescs, i;
  874 
  875         KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
  876                 ("%s: Tx data: invalid qid=%d\n",
  877                  device_get_nameunit(sc->dev), qid));
  878 
  879         RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
  880 
  881         ifp = sc->ifp;
  882         ring = &sc->tx_ring[qid];
  883         desc = &ring->desc[ring->desc_cur];
  884         data = &ring->data[ring->data_cur];
  885 
  886         error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
  887             dma_seg, &ndmasegs, 0);
  888         if (error != 0) {
  889                 /* too many fragments, linearize */
  890 
  891                 RT_DPRINTF(sc, RT_DEBUG_TX,
  892                         "could not load mbuf DMA map, trying to linearize "
  893                         "mbuf: ndmasegs=%d, len=%d, error=%d\n",
  894                         ndmasegs, m->m_pkthdr.len, error);
  895 
  896                 m_d = m_collapse(m, M_DONTWAIT, 16);
  897                 if (m_d == NULL) {
  898                         m_freem(m);
  899                         m = NULL;
  900                         return (ENOMEM);
  901                 }
  902                 m = m_d;
  903 
  904                 sc->tx_defrag_packets++;
  905 
  906                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
  907                     data->dma_map, m, dma_seg, &ndmasegs, 0);
  908                 if (error != 0) {
  909                         device_printf(sc->dev, "could not load mbuf DMA map: "
  910                             "ndmasegs=%d, len=%d, error=%d\n",
  911                             ndmasegs, m->m_pkthdr.len, error);
  912                         m_freem(m);
  913                         return (error);
  914                 }
  915         }
  916 
  917         if (m->m_pkthdr.len == 0)
  918                 ndmasegs = 0;
  919 
  920         /* determine how many Tx descs are required */
  921         ndescs = 1 + ndmasegs / 2;
  922         if ((ring->desc_queued + ndescs) >
  923             (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
  924                 RT_DPRINTF(sc, RT_DEBUG_TX,
  925                     "there are not enough Tx descs\n");
  926 
  927                 sc->no_tx_desc_avail++;
  928 
  929                 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
  930                 m_freem(m);
  931                 return (EFBIG);
  932         }
  933 
  934         data->m = m;
  935 
  936         /* set up Tx descs */
  937         for (i = 0; i < ndmasegs; i += 2) {
  938                 /* Set destenation */
  939                 desc->dst = (TXDSCR_DST_PORT_GDMA1);
  940                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
  941                         desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
  942                             TXDSCR_TCP_CSUM_GEN);
  943                 /* Set queue id */
  944                 desc->qn = qid;
  945                 /* No PPPoE */
  946                 desc->pppoe = 0;
  947                 /* No VLAN */
  948                 desc->vid = 0;
  949 
  950                 desc->sdp0 = htole32(dma_seg[i].ds_addr);
  951                 desc->sdl0 = htole16(dma_seg[i].ds_len |
  952                     ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
  953 
  954                 if ((i+1) < ndmasegs) {
  955                         desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
  956                         desc->sdl1 = htole16(dma_seg[i+1].ds_len |
  957                             ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
  958                 } else {
  959                         desc->sdp1 = 0;
  960                         desc->sdl1 = 0;
  961                 }
  962 
  963                 if ((i+2) < ndmasegs) {
  964                         ring->desc_queued++;
  965                         ring->desc_cur = (ring->desc_cur + 1) %
  966                             RT_SOFTC_TX_RING_DESC_COUNT;
  967                 }
  968                 desc = &ring->desc[ring->desc_cur];
  969         }
  970 
  971         RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
  972             "DMA ds_len=%d/%d/%d/%d/%d\n",
  973             m->m_pkthdr.len, ndmasegs,
  974             (int) dma_seg[0].ds_len,
  975             (int) dma_seg[1].ds_len,
  976             (int) dma_seg[2].ds_len,
  977             (int) dma_seg[3].ds_len,
  978             (int) dma_seg[4].ds_len);
  979 
  980         bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
  981                 BUS_DMASYNC_PREWRITE);
  982         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
  983                 BUS_DMASYNC_PREWRITE);
  984         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
  985                 BUS_DMASYNC_PREWRITE);
  986 
  987         ring->desc_queued++;
  988         ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
  989 
  990         ring->data_queued++;
  991         ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
  992 
  993         /* kick Tx */
  994         RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
  995 
  996         return (0);
  997 }
  998 
  999 /*
 1000  * rt_start - start Transmit/Receive
 1001  */
 1002 static void
 1003 rt_start(struct ifnet *ifp)
 1004 {
 1005         struct rt_softc *sc;
 1006         struct mbuf *m;
 1007         int qid = 0 /* XXX must check QoS priority */;
 1008 
 1009         sc = ifp->if_softc;
 1010 
 1011         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1012                 return;
 1013 
 1014         for (;;) {
 1015                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1016                 if (m == NULL)
 1017                         break;
 1018 
 1019                 m->m_pkthdr.rcvif = NULL;
 1020 
 1021                 RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
 1022 
 1023                 if (sc->tx_ring[qid].data_queued >=
 1024                     RT_SOFTC_TX_RING_DATA_COUNT) {
 1025                         RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1026 
 1027                         RT_DPRINTF(sc, RT_DEBUG_TX,
 1028                             "if_start: Tx ring with qid=%d is full\n", qid);
 1029 
 1030                         m_freem(m);
 1031 
 1032                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1033                         ifp->if_oerrors++;
 1034 
 1035                         sc->tx_data_queue_full[qid]++;
 1036 
 1037                         break;
 1038                 }
 1039 
 1040                 if (rt_tx_data(sc, m, qid) != 0) {
 1041                         RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1042 
 1043                         ifp->if_oerrors++;
 1044 
 1045                         break;
 1046                 }
 1047 
 1048                 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
 1049                 sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
 1050                 callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
 1051         }
 1052 }
 1053 
 1054 /*
 1055  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
 1056  * filtering done by attached Ethernet switch.
 1057  */
 1058 static void
 1059 rt_update_promisc(struct ifnet *ifp)
 1060 {
 1061         struct rt_softc *sc;
 1062 
 1063         sc = ifp->if_softc;
 1064         printf("%s: %s promiscuous mode\n",
 1065                 device_get_nameunit(sc->dev),
 1066                 (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
 1067 }
 1068 
 1069 /*
 1070  * rt_ioctl - ioctl handler.
 1071  */
 1072 static int
 1073 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1074 {
 1075         struct rt_softc *sc;
 1076         struct ifreq *ifr;
 1077 #ifdef IF_RT_PHY_SUPPORT
 1078         struct mii_data *mii;
 1079 #endif /* IF_RT_PHY_SUPPORT */
 1080         int error, startall;
 1081 
 1082         sc = ifp->if_softc;
 1083         ifr = (struct ifreq *) data;
 1084 
 1085         error = 0;
 1086 
 1087         switch (cmd) {
 1088         case SIOCSIFFLAGS:
 1089                 startall = 0;
 1090                 RT_SOFTC_LOCK(sc);
 1091                 if (ifp->if_flags & IFF_UP) {
 1092                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 1093                                 if ((ifp->if_flags ^ sc->if_flags) &
 1094                                     IFF_PROMISC)
 1095                                         rt_update_promisc(ifp);
 1096                         } else {
 1097                                 rt_init_locked(sc);
 1098                                 startall = 1;
 1099                         }
 1100                 } else {
 1101                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 1102                                 rt_stop_locked(sc);
 1103                 }
 1104                 sc->if_flags = ifp->if_flags;
 1105                 RT_SOFTC_UNLOCK(sc);
 1106                 break;
 1107         case SIOCGIFMEDIA:
 1108         case SIOCSIFMEDIA:
 1109 #ifdef IF_RT_PHY_SUPPORT
 1110                 mii = device_get_softc(sc->rt_miibus);
 1111                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
 1112 #else
 1113                 error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
 1114 #endif /* IF_RT_PHY_SUPPORT */
 1115                 break;
 1116         default:
 1117                 error = ether_ioctl(ifp, cmd, data);
 1118                 break;
 1119         }
 1120         return (error);
 1121 }
 1122 
 1123 /*
 1124  * rt_periodic - Handler of PERIODIC interrupt
 1125  */
 1126 static void
 1127 rt_periodic(void *arg)
 1128 {
 1129         struct rt_softc *sc;
 1130 
 1131         sc = arg;
 1132         RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
 1133         taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
 1134 }
 1135 
 1136 /*
 1137  * rt_tx_watchdog - Handler of TX Watchdog
 1138  */
 1139 static void
 1140 rt_tx_watchdog(void *arg)
 1141 {
 1142         struct rt_softc *sc;
 1143         struct ifnet *ifp;
 1144 
 1145         sc = arg;
 1146         ifp = sc->ifp;
 1147 
 1148         if (sc->tx_timer == 0)
 1149                 return;
 1150 
 1151         if (--sc->tx_timer == 0) {
 1152                 device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
 1153 #ifdef notyet
 1154                 /*
 1155                  * XXX: Commented out, because reset break input.
 1156                  */
 1157                 rt_stop_locked(sc);
 1158                 rt_init_locked(sc);
 1159 #endif
 1160                 ifp->if_oerrors++;
 1161                 sc->tx_watchdog_timeouts++;
 1162         }
 1163         callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
 1164 }
 1165 
 1166 /*
 1167  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
 1168  */
 1169 static void
 1170 rt_cnt_ppe_af(struct rt_softc *sc)
 1171 {
 1172 
 1173         RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
 1174 }
 1175 
 1176 /*
 1177  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
 1178  */
 1179 static void
 1180 rt_cnt_gdm_af(struct rt_softc *sc)
 1181 {
 1182 
 1183         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1184             "GDMA 1 & 2 Counter Table Almost Full\n");
 1185 }
 1186 
 1187 /*
 1188  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
 1189  */
 1190 static void
 1191 rt_pse_p2_fc(struct rt_softc *sc)
 1192 {
 1193 
 1194         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1195             "PSE port2 (GDMA 2) flow control asserted.\n");
 1196 }
 1197 
 1198 /*
 1199  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
 1200  * interrupt
 1201  */
 1202 static void
 1203 rt_gdm_crc_drop(struct rt_softc *sc)
 1204 {
 1205 
 1206         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1207             "GDMA 1 & 2 discard a packet due to CRC error\n");
 1208 }
 1209 
 1210 /*
 1211  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
 1212  */
 1213 static void
 1214 rt_pse_buf_drop(struct rt_softc *sc)
 1215 {
 1216 
 1217         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1218             "PSE discards a packet due to buffer sharing limitation\n");
 1219 }
 1220 
 1221 /*
 1222  * rt_gdm_other_drop - Handler of discard on other reason interrupt
 1223  */
 1224 static void
 1225 rt_gdm_other_drop(struct rt_softc *sc)
 1226 {
 1227 
 1228         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1229             "GDMA 1 & 2 discard a packet due to other reason\n");
 1230 }
 1231 
 1232 /*
 1233  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
 1234  */
 1235 static void
 1236 rt_pse_p1_fc(struct rt_softc *sc)
 1237 {
 1238 
 1239         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1240             "PSE port1 (GDMA 1) flow control asserted.\n");
 1241 }
 1242 
 1243 /*
 1244  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
 1245  */
 1246 static void
 1247 rt_pse_p0_fc(struct rt_softc *sc)
 1248 {
 1249 
 1250         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1251             "PSE port0 (CDMA) flow control asserted.\n");
 1252 }
 1253 
 1254 /*
 1255  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
 1256  */
 1257 static void
 1258 rt_pse_fq_empty(struct rt_softc *sc)
 1259 {
 1260 
 1261         RT_DPRINTF(sc, RT_DEBUG_INTR,
 1262             "PSE free Q empty threshold reached & forced drop "
 1263                     "condition occurred.\n");
 1264 }
 1265 
 1266 /*
 1267  * rt_intr - main ISR
 1268  */
 1269 static void
 1270 rt_intr(void *arg)
 1271 {
 1272         struct rt_softc *sc;
 1273         struct ifnet *ifp;
 1274         uint32_t status;
 1275 
 1276         sc = arg;
 1277         ifp = sc->ifp;
 1278 
 1279         /* acknowledge interrupts */
 1280         status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
 1281         RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
 1282 
 1283         RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
 1284 
 1285         if (status == 0xffffffff ||     /* device likely went away */
 1286                 status == 0)            /* not for us */
 1287                 return;
 1288 
 1289         sc->interrupts++;
 1290 
 1291         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1292                 return;
 1293 
 1294         if (status & CNT_PPE_AF)
 1295                 rt_cnt_ppe_af(sc);
 1296 
 1297         if (status & CNT_GDM_AF)
 1298                 rt_cnt_gdm_af(sc);
 1299 
 1300         if (status & PSE_P2_FC)
 1301                 rt_pse_p2_fc(sc);
 1302 
 1303         if (status & GDM_CRC_DROP)
 1304                 rt_gdm_crc_drop(sc);
 1305 
 1306         if (status & PSE_BUF_DROP)
 1307                 rt_pse_buf_drop(sc);
 1308 
 1309         if (status & GDM_OTHER_DROP)
 1310                 rt_gdm_other_drop(sc);
 1311 
 1312         if (status & PSE_P1_FC)
 1313                 rt_pse_p1_fc(sc);
 1314 
 1315         if (status & PSE_P0_FC)
 1316                 rt_pse_p0_fc(sc);
 1317 
 1318         if (status & PSE_FQ_EMPTY)
 1319                 rt_pse_fq_empty(sc);
 1320 
 1321         if (status & INT_TX_COHERENT)
 1322                 rt_tx_coherent_intr(sc);
 1323 
 1324         if (status & INT_RX_COHERENT)
 1325                 rt_rx_coherent_intr(sc);
 1326 
 1327         if (status & RX_DLY_INT)
 1328                 rt_rx_delay_intr(sc);
 1329 
 1330         if (status & TX_DLY_INT)
 1331                 rt_tx_delay_intr(sc);
 1332 
 1333         if (status & INT_RX_DONE)
 1334                 rt_rx_intr(sc);
 1335 
 1336         if (status & INT_TXQ3_DONE)
 1337                 rt_tx_intr(sc, 3);
 1338 
 1339         if (status & INT_TXQ2_DONE)
 1340                 rt_tx_intr(sc, 2);
 1341 
 1342         if (status & INT_TXQ1_DONE)
 1343                 rt_tx_intr(sc, 1);
 1344 
 1345         if (status & INT_TXQ0_DONE)
 1346                 rt_tx_intr(sc, 0);
 1347 }
 1348 
 1349 static void
 1350 rt_tx_coherent_intr(struct rt_softc *sc)
 1351 {
 1352         uint32_t tmp;
 1353         int i;
 1354 
 1355         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
 1356 
 1357         sc->tx_coherent_interrupts++;
 1358 
 1359         /* restart DMA engine */
 1360         tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
 1361         tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
 1362         RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
 1363 
 1364         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
 1365                 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
 1366 
 1367         for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
 1368                 RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
 1369                         sc->tx_ring[i].desc_phys_addr);
 1370                 RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
 1371                         RT_SOFTC_TX_RING_DESC_COUNT);
 1372                 RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
 1373         }
 1374 
 1375         rt_txrx_enable(sc);
 1376 }
 1377 
 1378 /*
 1379  * rt_rx_coherent_intr
 1380  */
 1381 static void
 1382 rt_rx_coherent_intr(struct rt_softc *sc)
 1383 {
 1384         uint32_t tmp;
 1385 
 1386         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
 1387 
 1388         sc->rx_coherent_interrupts++;
 1389 
 1390         /* restart DMA engine */
 1391         tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
 1392         tmp &= ~(FE_RX_DMA_EN);
 1393         RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
 1394 
 1395         /* init Rx ring */
 1396         rt_reset_rx_ring(sc, &sc->rx_ring);
 1397         RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
 1398                 sc->rx_ring.desc_phys_addr);
 1399         RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
 1400                 RT_SOFTC_RX_RING_DATA_COUNT);
 1401         RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
 1402                 RT_SOFTC_RX_RING_DATA_COUNT - 1);
 1403 
 1404         rt_txrx_enable(sc);
 1405 }
 1406 
 1407 /*
 1408  * rt_rx_intr - a packet received
 1409  */
 1410 static void
 1411 rt_rx_intr(struct rt_softc *sc)
 1412 {
 1413 
 1414         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
 1415         sc->rx_interrupts++;
 1416         RT_SOFTC_LOCK(sc);
 1417 
 1418         if (!(sc->intr_disable_mask & INT_RX_DONE)) {
 1419                 rt_intr_disable(sc, INT_RX_DONE);
 1420                 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
 1421         }
 1422 
 1423         sc->intr_pending_mask |= INT_RX_DONE;
 1424         RT_SOFTC_UNLOCK(sc);
 1425 }
 1426 
 1427 static void
 1428 rt_rx_delay_intr(struct rt_softc *sc)
 1429 {
 1430 
 1431         RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
 1432         sc->rx_delay_interrupts++;
 1433 }
 1434 
 1435 static void
 1436 rt_tx_delay_intr(struct rt_softc *sc)
 1437 {
 1438 
 1439         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
 1440         sc->tx_delay_interrupts++;
 1441 }
 1442 
 1443 /*
 1444  * rt_tx_intr - Transsmition of packet done
 1445  */
 1446 static void
 1447 rt_tx_intr(struct rt_softc *sc, int qid)
 1448 {
 1449 
 1450         KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
 1451                 ("%s: Tx interrupt: invalid qid=%d\n",
 1452                  device_get_nameunit(sc->dev), qid));
 1453 
 1454         RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
 1455 
 1456         sc->tx_interrupts[qid]++;
 1457         RT_SOFTC_LOCK(sc);
 1458 
 1459         if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
 1460                 rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
 1461                 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
 1462         }
 1463 
 1464         sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
 1465         RT_SOFTC_UNLOCK(sc);
 1466 }
 1467 
 1468 /*
 1469  * rt_rx_done_task - run RX task
 1470  */
 1471 static void
 1472 rt_rx_done_task(void *context, int pending)
 1473 {
 1474         struct rt_softc *sc;
 1475         struct ifnet *ifp;
 1476         int again;
 1477 
 1478         sc = context;
 1479         ifp = sc->ifp;
 1480 
 1481         RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
 1482 
 1483         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1484                 return;
 1485 
 1486         sc->intr_pending_mask &= ~INT_RX_DONE;
 1487 
 1488         again = rt_rx_eof(sc, sc->rx_process_limit);
 1489 
 1490         RT_SOFTC_LOCK(sc);
 1491 
 1492         if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
 1493                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1494                     "Rx done task: scheduling again\n");
 1495                 taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
 1496         } else {
 1497                 rt_intr_enable(sc, INT_RX_DONE);
 1498         }
 1499 
 1500         RT_SOFTC_UNLOCK(sc);
 1501 }
 1502 
 1503 /*
 1504  * rt_tx_done_task - check for pending TX task in all queues
 1505  */
 1506 static void
 1507 rt_tx_done_task(void *context, int pending)
 1508 {
 1509         struct rt_softc *sc;
 1510         struct ifnet *ifp;
 1511         uint32_t intr_mask;
 1512         int i;
 1513 
 1514         sc = context;
 1515         ifp = sc->ifp;
 1516 
 1517         RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
 1518 
 1519         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1520                 return;
 1521 
 1522         for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
 1523                 if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
 1524                         sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
 1525                         rt_tx_eof(sc, &sc->tx_ring[i]);
 1526                 }
 1527         }
 1528 
 1529         sc->tx_timer = 0;
 1530 
 1531         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1532 
 1533         intr_mask = (
 1534                 INT_TXQ3_DONE |
 1535                 INT_TXQ2_DONE |
 1536                 INT_TXQ1_DONE |
 1537                 INT_TXQ0_DONE);
 1538 
 1539         RT_SOFTC_LOCK(sc);
 1540 
 1541         rt_intr_enable(sc, ~sc->intr_pending_mask &
 1542             (sc->intr_disable_mask & intr_mask));
 1543 
 1544         if (sc->intr_pending_mask & intr_mask) {
 1545                 RT_DPRINTF(sc, RT_DEBUG_TX,
 1546                     "Tx done task: scheduling again\n");
 1547                 taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
 1548         }
 1549 
 1550         RT_SOFTC_UNLOCK(sc);
 1551 
 1552         if (!IFQ_IS_EMPTY(&ifp->if_snd))
 1553                 rt_start(ifp);
 1554 }
 1555 
 1556 /*
 1557  * rt_periodic_task - run periodic task
 1558  */
 1559 static void
 1560 rt_periodic_task(void *context, int pending)
 1561 {
 1562         struct rt_softc *sc;
 1563         struct ifnet *ifp;
 1564 
 1565         sc = context;
 1566         ifp = sc->ifp;
 1567 
 1568         RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
 1569             sc->periodic_round);
 1570 
 1571         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
 1572                 return;
 1573 
 1574         RT_SOFTC_LOCK(sc);
 1575         sc->periodic_round++;
 1576         rt_update_stats(sc);
 1577 
 1578         if ((sc->periodic_round % 10) == 0) {
 1579                 rt_update_raw_counters(sc);
 1580                 rt_watchdog(sc);
 1581         }
 1582 
 1583         RT_SOFTC_UNLOCK(sc);
 1584         callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
 1585 }
 1586 
 1587 /*
 1588  * rt_rx_eof - check for frames that done by DMA engine and pass it into
 1589  * network subsystem.
 1590  */
 1591 static int
 1592 rt_rx_eof(struct rt_softc *sc, int limit)
 1593 {
 1594         struct ifnet *ifp;
 1595         struct rt_softc_rx_ring *ring;
 1596         struct rt_rxdesc *desc;
 1597         struct rt_softc_rx_data *data;
 1598         struct mbuf *m, *mnew;
 1599         bus_dma_segment_t segs[1];
 1600         bus_dmamap_t dma_map;
 1601         uint32_t index, desc_flags;
 1602         int error, nsegs, len, nframes;
 1603 
 1604         ifp = sc->ifp;
 1605         ring = &sc->rx_ring;
 1606 
 1607         nframes = 0;
 1608 
 1609         while (limit != 0) {
 1610                 index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
 1611                 if (ring->cur == index)
 1612                         break;
 1613 
 1614                 desc = &ring->desc[ring->cur];
 1615                 data = &ring->data[ring->cur];
 1616 
 1617                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1618                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1619 
 1620 #ifdef IF_RT_DEBUG
 1621                 if ( sc->debug & RT_DEBUG_RX ) {
 1622                         printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
 1623                         hexdump(desc, 16, 0, 0);
 1624                         printf("-----------------------------------\n");
 1625                 }
 1626 #endif
 1627 
 1628                 /* XXX Sometime device don`t set DDONE bit */
 1629 #ifdef DDONE_FIXED
 1630                 if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
 1631                         RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
 1632                         break;
 1633                 }
 1634 #endif
 1635 
 1636                 len = le16toh(desc->sdl0) & 0x3fff;
 1637                 RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
 1638 
 1639                 nframes++;
 1640 
 1641                 mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
 1642                     MJUMPAGESIZE);
 1643                 if (mnew == NULL) {
 1644                         sc->rx_mbuf_alloc_errors++;
 1645                         ifp->if_ierrors++;
 1646                         goto skip;
 1647                 }
 1648 
 1649                 mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
 1650 
 1651                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
 1652                     ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
 1653                 if (error != 0) {
 1654                         RT_DPRINTF(sc, RT_DEBUG_RX,
 1655                             "could not load Rx mbuf DMA map: "
 1656                             "error=%d, nsegs=%d\n",
 1657                             error, nsegs);
 1658 
 1659                         m_freem(mnew);
 1660 
 1661                         sc->rx_mbuf_dmamap_errors++;
 1662                         ifp->if_ierrors++;
 1663 
 1664                         goto skip;
 1665                 }
 1666 
 1667                 KASSERT(nsegs == 1, ("%s: too many DMA segments",
 1668                         device_get_nameunit(sc->dev)));
 1669 
 1670                 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1671                         BUS_DMASYNC_POSTREAD);
 1672                 bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 1673 
 1674                 dma_map = data->dma_map;
 1675                 data->dma_map = ring->spare_dma_map;
 1676                 ring->spare_dma_map = dma_map;
 1677 
 1678                 bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1679                         BUS_DMASYNC_PREREAD);
 1680 
 1681                 m = data->m;
 1682                 desc_flags = desc->src;
 1683 
 1684                 data->m = mnew;
 1685                 /* Add 2 for proper align of RX IP header */
 1686                 desc->sdp0 = htole32(segs[0].ds_addr+2);
 1687                 desc->sdl0 = htole32(segs[0].ds_len-2);
 1688                 desc->src = 0;
 1689                 desc->ai = 0;
 1690                 desc->foe = 0;
 1691 
 1692                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1693                     "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
 1694 
 1695                 m->m_pkthdr.rcvif = ifp;
 1696                 /* Add 2 to fix data align, after sdp0 = addr + 2 */
 1697                 m->m_data += 2;
 1698                 m->m_pkthdr.len = m->m_len = len;
 1699 
 1700                 /* check for crc errors */
 1701                 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
 1702                         /*check for valid checksum*/
 1703                         if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
 1704                             RXDSXR_SRC_L4_CSUM_FAIL)) {
 1705                                 RT_DPRINTF(sc, RT_DEBUG_RX,
 1706                                     "rxdesc: crc error\n");
 1707 
 1708                                 ifp->if_ierrors++;
 1709 
 1710                                 if (!(ifp->if_flags & IFF_PROMISC)) {
 1711                                     m_freem(m);
 1712                                     goto skip;
 1713                                 }
 1714                         }
 1715                         if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
 1716                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
 1717                                 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
 1718                                 m->m_pkthdr.csum_data = 0xffff;
 1719                         }
 1720                         m->m_flags &= ~M_HASFCS;
 1721                 }
 1722 
 1723                 (*ifp->if_input)(ifp, m);
 1724 skip:
 1725                 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
 1726 
 1727                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1728                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1729 
 1730                 ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
 1731 
 1732                 limit--;
 1733         }
 1734 
 1735         if (ring->cur == 0)
 1736                 RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
 1737                         RT_SOFTC_RX_RING_DATA_COUNT - 1);
 1738         else
 1739                 RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
 1740                         ring->cur - 1);
 1741 
 1742         RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
 1743 
 1744         sc->rx_packets += nframes;
 1745 
 1746         return (limit == 0);
 1747 }
 1748 
 1749 /*
 1750  * rt_tx_eof - check for successful transmitted frames and mark their
 1751  * descriptor as free.
 1752  */
 1753 static void
 1754 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 1755 {
 1756         struct ifnet *ifp;
 1757         struct rt_txdesc *desc;
 1758         struct rt_softc_tx_data *data;
 1759         uint32_t index;
 1760         int ndescs, nframes;
 1761 
 1762         ifp = sc->ifp;
 1763 
 1764         ndescs = 0;
 1765         nframes = 0;
 1766 
 1767         for (;;) {
 1768                 index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
 1769                 if (ring->desc_next == index)
 1770                         break;
 1771 
 1772                 ndescs++;
 1773 
 1774                 desc = &ring->desc[ring->desc_next];
 1775 
 1776                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1777                         BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1778 
 1779                 if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
 1780                         desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
 1781                         nframes++;
 1782 
 1783                         data = &ring->data[ring->data_next];
 1784 
 1785                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 1786                                 BUS_DMASYNC_POSTWRITE);
 1787                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 1788 
 1789                         m_freem(data->m);
 1790 
 1791                         data->m = NULL;
 1792 
 1793                         ifp->if_opackets++;
 1794 
 1795                         RT_SOFTC_TX_RING_LOCK(ring);
 1796                         ring->data_queued--;
 1797                         ring->data_next = (ring->data_next + 1) %
 1798                             RT_SOFTC_TX_RING_DATA_COUNT;
 1799                         RT_SOFTC_TX_RING_UNLOCK(ring);
 1800                 }
 1801 
 1802                 desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
 1803 
 1804                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 1805                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1806 
 1807                 RT_SOFTC_TX_RING_LOCK(ring);
 1808                 ring->desc_queued--;
 1809                 ring->desc_next = (ring->desc_next + 1) %
 1810                     RT_SOFTC_TX_RING_DESC_COUNT;
 1811                 RT_SOFTC_TX_RING_UNLOCK(ring);
 1812         }
 1813 
 1814         RT_DPRINTF(sc, RT_DEBUG_TX,
 1815             "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
 1816             nframes);
 1817 }
 1818 
 1819 /*
 1820  * rt_update_stats - query statistics counters and update related variables.
 1821  */
 1822 static void
 1823 rt_update_stats(struct rt_softc *sc)
 1824 {
 1825         struct ifnet *ifp;
 1826 
 1827         ifp = sc->ifp;
 1828         RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
 1829         /* XXX do update stats here */
 1830 }
 1831 
 1832 /*
 1833  * rt_watchdog - reinit device on watchdog event.
 1834  */
 1835 static void
 1836 rt_watchdog(struct rt_softc *sc)
 1837 {
 1838         uint32_t tmp;
 1839 #ifdef notyet
 1840         int ntries;
 1841 #endif
 1842 
 1843         tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
 1844 
 1845         RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n",
 1846             tmp);
 1847 
 1848         /* XXX: do not reset */
 1849 #ifdef notyet
 1850         if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
 1851                 sc->tx_queue_not_empty[0]++;
 1852 
 1853                 for (ntries = 0; ntries < 10; ntries++) {
 1854                         tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
 1855                         if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
 1856                                 break;
 1857 
 1858                         DELAY(1);
 1859                 }
 1860         }
 1861 
 1862         if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
 1863                 sc->tx_queue_not_empty[1]++;
 1864 
 1865                 for (ntries = 0; ntries < 10; ntries++) {
 1866                         tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
 1867                         if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
 1868                                 break;
 1869 
 1870                         DELAY(1);
 1871                 }
 1872         }
 1873 #endif
 1874 }
 1875 
 1876 /*
 1877  * rt_update_raw_counters - update counters.
 1878  */
 1879 static void
 1880 rt_update_raw_counters(struct rt_softc *sc)
 1881 {
 1882 
 1883         sc->tx_bytes    += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
 1884         sc->tx_packets  += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
 1885         sc->tx_skip     += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
 1886         sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
 1887 
 1888         sc->rx_bytes    += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
 1889         sc->rx_packets  += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
 1890         sc->rx_crc_err  += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
 1891         sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
 1892         sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
 1893         sc->rx_phy_err  += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
 1894         sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
 1895 }
 1896 
 1897 static void
 1898 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
 1899 {
 1900         uint32_t tmp;
 1901 
 1902         sc->intr_disable_mask &= ~intr_mask;
 1903         tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
 1904         RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
 1905 }
 1906 
 1907 static void
 1908 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
 1909 {
 1910         uint32_t tmp;
 1911 
 1912         sc->intr_disable_mask |= intr_mask;
 1913         tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
 1914         RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
 1915 }
 1916 
 1917 /*
 1918  * rt_txrx_enable - enable TX/RX DMA
 1919  */
 1920 static int
 1921 rt_txrx_enable(struct rt_softc *sc)
 1922 {
 1923         struct ifnet *ifp;
 1924         uint32_t tmp;
 1925         int ntries;
 1926 
 1927         ifp = sc->ifp;
 1928 
 1929         /* enable Tx/Rx DMA engine */
 1930         for (ntries = 0; ntries < 200; ntries++) {
 1931                 tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
 1932                 if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
 1933                         break;
 1934 
 1935                 DELAY(1000);
 1936         }
 1937 
 1938         if (ntries == 200) {
 1939                 device_printf(sc->dev, "timeout waiting for DMA engine\n");
 1940                 return (-1);
 1941         }
 1942 
 1943         DELAY(50);
 1944 
 1945         tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN;
 1946         RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
 1947 
 1948         /* XXX set Rx filter */
 1949         return (0);
 1950 }
 1951 
 1952 /*
 1953  * rt_alloc_rx_ring - allocate RX DMA ring buffer
 1954  */
 1955 static int
 1956 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
 1957 {
 1958         struct rt_rxdesc *desc;
 1959         struct rt_softc_rx_data *data;
 1960         bus_dma_segment_t segs[1];
 1961         int i, nsegs, error;
 1962 
 1963         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 1964                 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1965                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
 1966                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
 1967                 0, NULL, NULL, &ring->desc_dma_tag);
 1968         if (error != 0) {
 1969                 device_printf(sc->dev,
 1970                     "could not create Rx desc DMA tag\n");
 1971                 goto fail;
 1972         }
 1973 
 1974         error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
 1975             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
 1976         if (error != 0) {
 1977                 device_printf(sc->dev,
 1978                     "could not allocate Rx desc DMA memory\n");
 1979                 goto fail;
 1980         }
 1981 
 1982         error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
 1983                 ring->desc,
 1984                 RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
 1985                 rt_dma_map_addr, &ring->desc_phys_addr, 0);
 1986         if (error != 0) {
 1987                 device_printf(sc->dev, "could not load Rx desc DMA map\n");
 1988                 goto fail;
 1989         }
 1990 
 1991         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 1992             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 1993                 MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
 1994                 &ring->data_dma_tag);
 1995         if (error != 0) {
 1996                 device_printf(sc->dev,
 1997                     "could not create Rx data DMA tag\n");
 1998                 goto fail;
 1999         }
 2000 
 2001         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2002                 desc = &ring->desc[i];
 2003                 data = &ring->data[i];
 2004 
 2005                 error = bus_dmamap_create(ring->data_dma_tag, 0,
 2006                     &data->dma_map);
 2007                 if (error != 0) {
 2008                         device_printf(sc->dev, "could not create Rx data DMA "
 2009                             "map\n");
 2010                         goto fail;
 2011                 }
 2012 
 2013                 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
 2014                     MJUMPAGESIZE);
 2015                 if (data->m == NULL) {
 2016                         device_printf(sc->dev, "could not allocate Rx mbuf\n");
 2017                         error = ENOMEM;
 2018                         goto fail;
 2019                 }
 2020 
 2021                 data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
 2022 
 2023                 error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
 2024                     data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
 2025                 if (error != 0) {
 2026                         device_printf(sc->dev,
 2027                             "could not load Rx mbuf DMA map\n");
 2028                         goto fail;
 2029                 }
 2030 
 2031                 KASSERT(nsegs == 1, ("%s: too many DMA segments",
 2032                         device_get_nameunit(sc->dev)));
 2033 
 2034                 /* Add 2 for proper align of RX IP header */
 2035                 desc->sdp0 = htole32(segs[0].ds_addr+2);
 2036                 desc->sdl0 = htole32(segs[0].ds_len-2);
 2037         }
 2038 
 2039         error = bus_dmamap_create(ring->data_dma_tag, 0,
 2040             &ring->spare_dma_map);
 2041         if (error != 0) {
 2042                 device_printf(sc->dev,
 2043                     "could not create Rx spare DMA map\n");
 2044                 goto fail;
 2045         }
 2046 
 2047         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2048                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2049         return (0);
 2050 
 2051 fail:
 2052         rt_free_rx_ring(sc, ring);
 2053         return (error);
 2054 }
 2055 
 2056 /*
 2057  * rt_reset_rx_ring - reset RX ring buffer
 2058  */
 2059 static void
 2060 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
 2061 {
 2062         struct rt_rxdesc *desc;
 2063         int i;
 2064 
 2065         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2066                 desc = &ring->desc[i];
 2067                 desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
 2068         }
 2069 
 2070         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2071                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 2072         ring->cur = 0;
 2073 }
 2074 
 2075 /*
 2076  * rt_free_rx_ring - free memory used by RX ring buffer
 2077  */
 2078 static void
 2079 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
 2080 {
 2081         struct rt_softc_rx_data *data;
 2082         int i;
 2083 
 2084         if (ring->desc != NULL) {
 2085                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2086                         BUS_DMASYNC_POSTWRITE);
 2087                 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
 2088                 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
 2089                         ring->desc_dma_map);
 2090         }
 2091 
 2092         if (ring->desc_dma_tag != NULL)
 2093                 bus_dma_tag_destroy(ring->desc_dma_tag);
 2094 
 2095         for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
 2096                 data = &ring->data[i];
 2097 
 2098                 if (data->m != NULL) {
 2099                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2100                                 BUS_DMASYNC_POSTREAD);
 2101                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2102                         m_freem(data->m);
 2103                 }
 2104 
 2105                 if (data->dma_map != NULL)
 2106                         bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
 2107         }
 2108 
 2109         if (ring->spare_dma_map != NULL)
 2110                 bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
 2111 
 2112         if (ring->data_dma_tag != NULL)
 2113                 bus_dma_tag_destroy(ring->data_dma_tag);
 2114 }
 2115 
 2116 /*
 2117  * rt_alloc_tx_ring - allocate TX ring buffer
 2118  */
 2119 static int
 2120 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
 2121 {
 2122         struct rt_softc_tx_data *data;
 2123         int error, i;
 2124 
 2125         mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
 2126 
 2127         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2128                 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2129                 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
 2130                 RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
 2131                 0, NULL, NULL, &ring->desc_dma_tag);
 2132         if (error != 0) {
 2133                 device_printf(sc->dev,
 2134                     "could not create Tx desc DMA tag\n");
 2135                 goto fail;
 2136         }
 2137 
 2138         error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
 2139             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
 2140         if (error != 0) {
 2141                 device_printf(sc->dev,
 2142                     "could not allocate Tx desc DMA memory\n");
 2143                 goto fail;
 2144         }
 2145 
 2146         error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
 2147             ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT *
 2148             sizeof(struct rt_txdesc)), rt_dma_map_addr,
 2149             &ring->desc_phys_addr, 0);
 2150         if (error != 0) {
 2151                 device_printf(sc->dev, "could not load Tx desc DMA map\n");
 2152                 goto fail;
 2153         }
 2154 
 2155         ring->desc_queued = 0;
 2156         ring->desc_cur = 0;
 2157         ring->desc_next = 0;
 2158 
 2159         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2160             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2161             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
 2162             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
 2163             0, NULL, NULL, &ring->seg0_dma_tag);
 2164         if (error != 0) {
 2165                 device_printf(sc->dev,
 2166                     "could not create Tx seg0 DMA tag\n");
 2167                 goto fail;
 2168         }
 2169 
 2170         error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
 2171             BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
 2172         if (error != 0) {
 2173                 device_printf(sc->dev,
 2174                     "could not allocate Tx seg0 DMA memory\n");
 2175                 goto fail;
 2176         }
 2177 
 2178         error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
 2179             ring->seg0,
 2180             RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
 2181             rt_dma_map_addr, &ring->seg0_phys_addr, 0);
 2182         if (error != 0) {
 2183                 device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
 2184                 goto fail;
 2185         }
 2186 
 2187         error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
 2188             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
 2189             MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
 2190             &ring->data_dma_tag);
 2191         if (error != 0) {
 2192                 device_printf(sc->dev,
 2193                     "could not create Tx data DMA tag\n");
 2194                 goto fail;
 2195         }
 2196 
 2197         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2198                 data = &ring->data[i];
 2199 
 2200                 error = bus_dmamap_create(ring->data_dma_tag, 0,
 2201                     &data->dma_map);
 2202                 if (error != 0) {
 2203                         device_printf(sc->dev, "could not create Tx data DMA "
 2204                             "map\n");
 2205                         goto fail;
 2206                 }
 2207         }
 2208 
 2209         ring->data_queued = 0;
 2210         ring->data_cur = 0;
 2211         ring->data_next = 0;
 2212 
 2213         ring->qid = qid;
 2214         return (0);
 2215 
 2216 fail:
 2217         rt_free_tx_ring(sc, ring);
 2218         return (error);
 2219 }
 2220 
 2221 /*
 2222  * rt_reset_tx_ring - reset TX ring buffer to empty state
 2223  */
 2224 static void
 2225 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 2226 {
 2227         struct rt_softc_tx_data *data;
 2228         struct rt_txdesc *desc;
 2229         int i;
 2230 
 2231         for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
 2232                 desc = &ring->desc[i];
 2233 
 2234                 desc->sdl0 = 0;
 2235                 desc->sdl1 = 0;
 2236         }
 2237 
 2238         ring->desc_queued = 0;
 2239         ring->desc_cur = 0;
 2240         ring->desc_next = 0;
 2241 
 2242         bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2243                 BUS_DMASYNC_PREWRITE);
 2244 
 2245         bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
 2246                 BUS_DMASYNC_PREWRITE);
 2247 
 2248         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2249                 data = &ring->data[i];
 2250 
 2251                 if (data->m != NULL) {
 2252                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2253                                 BUS_DMASYNC_POSTWRITE);
 2254                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2255                         m_freem(data->m);
 2256                         data->m = NULL;
 2257                 }
 2258         }
 2259 
 2260         ring->data_queued = 0;
 2261         ring->data_cur = 0;
 2262         ring->data_next = 0;
 2263 }
 2264 
 2265 /*
 2266  * rt_free_tx_ring - free RX ring buffer
 2267  */
 2268 static void
 2269 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
 2270 {
 2271         struct rt_softc_tx_data *data;
 2272         int i;
 2273 
 2274         if (ring->desc != NULL) {
 2275                 bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
 2276                         BUS_DMASYNC_POSTWRITE);
 2277                 bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
 2278                 bus_dmamem_free(ring->desc_dma_tag, ring->desc,
 2279                         ring->desc_dma_map);
 2280         }
 2281 
 2282         if (ring->desc_dma_tag != NULL)
 2283                 bus_dma_tag_destroy(ring->desc_dma_tag);
 2284 
 2285         if (ring->seg0 != NULL) {
 2286                 bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
 2287                         BUS_DMASYNC_POSTWRITE);
 2288                 bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
 2289                 bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
 2290                         ring->seg0_dma_map);
 2291         }
 2292 
 2293         if (ring->seg0_dma_tag != NULL)
 2294                 bus_dma_tag_destroy(ring->seg0_dma_tag);
 2295 
 2296         for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
 2297                 data = &ring->data[i];
 2298 
 2299                 if (data->m != NULL) {
 2300                         bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
 2301                                 BUS_DMASYNC_POSTWRITE);
 2302                         bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
 2303                         m_freem(data->m);
 2304                 }
 2305 
 2306                 if (data->dma_map != NULL)
 2307                         bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
 2308         }
 2309 
 2310         if (ring->data_dma_tag != NULL)
 2311                 bus_dma_tag_destroy(ring->data_dma_tag);
 2312 
 2313         mtx_destroy(&ring->lock);
 2314 }
 2315 
 2316 /*
 2317  * rt_dma_map_addr - get address of busdma segment
 2318  */
 2319 static void
 2320 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 2321 {
 2322         if (error != 0)
 2323                 return;
 2324 
 2325         KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
 2326 
 2327         *(bus_addr_t *) arg = segs[0].ds_addr;
 2328 }
 2329 
 2330 /*
 2331  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
 2332  */
 2333 static void
 2334 rt_sysctl_attach(struct rt_softc *sc)
 2335 {
 2336         struct sysctl_ctx_list *ctx;
 2337         struct sysctl_oid *tree;
 2338         struct sysctl_oid *stats;
 2339 
 2340         ctx = device_get_sysctl_ctx(sc->dev);
 2341         tree = device_get_sysctl_tree(sc->dev);
 2342 
 2343         /* statistic counters */
 2344         stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
 2345             "stats", CTLFLAG_RD, 0, "statistic");
 2346 
 2347         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2348             "interrupts", CTLFLAG_RD, &sc->interrupts, 0,
 2349             "all interrupts");
 2350 
 2351         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2352             "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
 2353             0, "Tx coherent interrupts");
 2354 
 2355         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2356             "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
 2357             0, "Rx coherent interrupts");
 2358 
 2359         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2360             "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
 2361             "Rx interrupts");
 2362 
 2363         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2364             "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
 2365             "Rx delay interrupts");
 2366 
 2367         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2368             "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
 2369             "Tx AC3 interrupts");
 2370 
 2371         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2372             "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
 2373             "Tx AC2 interrupts");
 2374 
 2375         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2376             "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
 2377             "Tx AC1 interrupts");
 2378 
 2379         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2380             "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
 2381             "Tx AC0 interrupts");
 2382 
 2383         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2384             "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
 2385             0, "Tx delay interrupts");
 2386 
 2387         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2388             "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
 2389             0, "Tx AC3 descriptors queued");
 2390 
 2391         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2392             "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
 2393             0, "Tx AC3 data queued");
 2394 
 2395         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2396             "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
 2397             0, "Tx AC2 descriptors queued");
 2398 
 2399         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2400             "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
 2401             0, "Tx AC2 data queued");
 2402 
 2403         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2404             "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
 2405             0, "Tx AC1 descriptors queued");
 2406 
 2407         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2408             "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
 2409             0, "Tx AC1 data queued");
 2410 
 2411         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2412             "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
 2413             0, "Tx AC0 descriptors queued");
 2414 
 2415         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2416             "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
 2417             0, "Tx AC0 data queued");
 2418 
 2419         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2420             "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
 2421             0, "Tx AC3 data queue full");
 2422 
 2423         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2424             "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
 2425             0, "Tx AC2 data queue full");
 2426 
 2427         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2428             "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
 2429             0, "Tx AC1 data queue full");
 2430 
 2431         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2432             "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
 2433             0, "Tx AC0 data queue full");
 2434 
 2435         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2436             "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
 2437             0, "Tx watchdog timeouts");
 2438 
 2439         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2440             "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
 2441             "Tx defragmented packets");
 2442 
 2443         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2444             "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
 2445             "no Tx descriptors available");
 2446 
 2447         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2448             "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
 2449             0, "Rx mbuf allocation errors");
 2450 
 2451         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2452             "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
 2453             0, "Rx mbuf DMA mapping errors");
 2454 
 2455         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2456             "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
 2457             0, "Tx queue 0 not empty");
 2458 
 2459         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2460             "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
 2461             0, "Tx queue 1 not empty");
 2462 
 2463         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2464             "rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
 2465             "Rx packets");
 2466 
 2467         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2468             "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
 2469             "Rx CRC errors");
 2470 
 2471         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2472             "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
 2473             "Rx PHY errors");
 2474 
 2475         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2476             "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
 2477             "Rx duplicate packets");
 2478 
 2479         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2480             "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
 2481             "Rx FIFO overflows");
 2482 
 2483         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2484             "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
 2485             "Rx bytes");
 2486 
 2487         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2488             "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
 2489             "Rx too long frame errors");
 2490 
 2491         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2492             "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
 2493             "Rx too short frame errors");
 2494 
 2495         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2496             "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
 2497             "Tx bytes");
 2498         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2499             "tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
 2500             "Tx packets");
 2501         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2502             "tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
 2503             "Tx skip count for GDMA ports");
 2504         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
 2505             "tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
 2506             "Tx collision count for GDMA ports");
 2507 }
 2508 
 2509 #ifdef IF_RT_PHY_SUPPORT
 2510 static int
 2511 rt_miibus_readreg(device_t dev, int phy, int reg)
 2512 {
 2513         struct rt_softc *sc = device_get_softc(dev);
 2514 
 2515         /*
 2516          * PSEUDO_PHYAD is a special value for indicate switch attached.
 2517          * No one PHY use PSEUDO_PHYAD (0x1e) address.
 2518          */
 2519         if (phy == 31) {
 2520                 /* Fake PHY ID for bfeswitch attach */
 2521                 switch (reg) {
 2522                 case MII_BMSR:
 2523                         return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
 2524                 case MII_PHYIDR1:
 2525                         return (0x40);          /* As result of faking */
 2526                 case MII_PHYIDR2:               /* PHY will detect as */
 2527                         return (0x6250);                /* bfeswitch */
 2528                 }
 2529         }
 2530 
 2531         /* Wait prev command done if any */
 2532         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2533         RT_WRITE(sc, MDIO_ACCESS,
 2534             MDIO_CMD_ONGO ||
 2535             ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
 2536             ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
 2537         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2538 
 2539         return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
 2540 }
 2541 
 2542 static int
 2543 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
 2544 {
 2545         struct rt_softc *sc = device_get_softc(dev);
 2546 
 2547         /* Wait prev command done if any */
 2548         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2549         RT_WRITE(sc, MDIO_ACCESS,
 2550             MDIO_CMD_ONGO || MDIO_CMD_WR ||
 2551             ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
 2552             ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
 2553             (val & MDIO_PHY_DATA_MASK));
 2554         while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
 2555 
 2556         return (0);
 2557 }
 2558 
 2559 void
 2560 rt_miibus_statchg(device_t dev)
 2561 {
 2562         struct rt_softc *sc = device_get_softc(dev);
 2563         struct mii_data *mii;
 2564 
 2565         mii = device_get_softc(sc->rt_miibus);
 2566 
 2567         if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
 2568             (IFM_ACTIVE | IFM_AVALID)) {
 2569                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
 2570                 case IFM_10_T:
 2571                 case IFM_100_TX:
 2572                         /* XXX check link here */
 2573                         sc->flags |= 1;
 2574                         break;
 2575                 default:
 2576                         break;
 2577                 }
 2578         }
 2579 }
 2580 #endif /* IF_RT_PHY_SUPPORT */
 2581 
 2582 static device_method_t rt_dev_methods[] =
 2583 {
 2584         DEVMETHOD(device_probe, rt_probe),
 2585         DEVMETHOD(device_attach, rt_attach),
 2586         DEVMETHOD(device_detach, rt_detach),
 2587         DEVMETHOD(device_shutdown, rt_shutdown),
 2588         DEVMETHOD(device_suspend, rt_suspend),
 2589         DEVMETHOD(device_resume, rt_resume),
 2590 
 2591         /* bus interface */
 2592         DEVMETHOD(bus_print_child,      bus_generic_print_child),
 2593         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
 2594 
 2595 #ifdef IF_RT_PHY_SUPPORT
 2596         /* MII interface */
 2597         DEVMETHOD(miibus_readreg,       rt_miibus_readreg),
 2598         DEVMETHOD(miibus_writereg,      rt_miibus_writereg),
 2599         DEVMETHOD(miibus_statchg,       rt_miibus_statchg),
 2600 #endif
 2601         { 0, 0 }
 2602 };
 2603 
 2604 static driver_t rt_driver =
 2605 {
 2606         "rt",
 2607         rt_dev_methods,
 2608         sizeof(struct rt_softc)
 2609 };
 2610 
 2611 static devclass_t rt_dev_class;
 2612 
 2613 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
 2614 MODULE_DEPEND(rt, ether, 1, 1, 1);
 2615 MODULE_DEPEND(rt, miibus, 1, 1, 1);
 2616 

Cache object: b6afccba3b42da7af3700e8d26048ec2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.