The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/at91/if_ate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 /* TODO: (in no order)
   26  *
   27  * 8) Need to sync busdma goo in atestop
   28  * 9) atestop should maybe free the mbufs?
   29  *
   30  * 1) detach
   31  * 2) Free dma setup
   32  * 3) Turn on the clock in pmc?  Turn off?
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/kernel.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/socket.h>
   47 #include <sys/sockio.h>
   48 #include <sys/sysctl.h>
   49 #include <machine/bus.h>
   50 
   51 #include <net/ethernet.h>
   52 #include <net/if.h>
   53 #include <net/if_arp.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_mib.h>
   57 #include <net/if_types.h>
   58 
   59 #ifdef INET
   60 #include <netinet/in.h>
   61 #include <netinet/in_systm.h>
   62 #include <netinet/in_var.h>
   63 #include <netinet/ip.h>
   64 #endif
   65 
   66 #include <net/bpf.h>
   67 #include <net/bpfdesc.h>
   68 
   69 #include <dev/mii/mii.h>
   70 #include <dev/mii/miivar.h>
   71 #include <arm/at91/if_atereg.h>
   72 
   73 #include "miibus_if.h"
   74 
   75 #define ATE_MAX_TX_BUFFERS 2            /* We have ping-pong tx buffers */
   76 #define ATE_MAX_RX_BUFFERS 64
   77 
   78 struct ate_softc
   79 {
   80         struct ifnet *ifp;              /* ifnet pointer */
   81         struct mtx sc_mtx;              /* basically a perimeter lock */
   82         device_t dev;                   /* Myself */
   83         device_t miibus;                /* My child miibus */
   84         void *intrhand;                 /* Interrupt handle */
   85         struct resource *irq_res;       /* IRQ resource */
   86         struct resource *mem_res;       /* Memory resource */
   87         struct callout tick_ch;         /* Tick callout */
   88         bus_dma_tag_t mtag;             /* bus dma tag for mbufs */
   89         bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
   90         struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
   91         bus_dma_tag_t rxtag;
   92         bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
   93         void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
   94         int rx_buf_ptr;
   95         bus_dma_tag_t rx_desc_tag;
   96         bus_dmamap_t rx_desc_map;
   97         int txcur;                      /* current tx map pointer */
   98         bus_addr_t rx_desc_phys;
   99         eth_rx_desc_t *rx_descs;
  100         int use_rmii;
  101         struct  ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
  102 };
  103 
  104 static inline uint32_t
  105 RD4(struct ate_softc *sc, bus_size_t off)
  106 {
  107         return bus_read_4(sc->mem_res, off);
  108 }
  109 
  110 static inline void
  111 WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
  112 {
  113         bus_write_4(sc->mem_res, off, val);
  114 }
  115 
  116 #define ATE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  117 #define ATE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  118 #define ATE_LOCK_INIT(_sc) \
  119         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
  120             MTX_NETWORK_LOCK, MTX_DEF)
  121 #define ATE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  122 #define ATE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  123 #define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  124 
  125 static devclass_t ate_devclass;
  126 
  127 /* ifnet entry points */
  128 
  129 static void ateinit_locked(void *);
  130 static void atestart_locked(struct ifnet *);
  131 
  132 static void ateinit(void *);
  133 static void atestart(struct ifnet *);
  134 static void atestop(struct ate_softc *);
  135 static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
  136 
  137 /* bus entry points */
  138 
  139 static int ate_probe(device_t dev);
  140 static int ate_attach(device_t dev);
  141 static int ate_detach(device_t dev);
  142 static void ate_intr(void *);
  143 
  144 /* helper routines */
  145 static int ate_activate(device_t dev);
  146 static void ate_deactivate(device_t dev);
  147 static int ate_ifmedia_upd(struct ifnet *ifp);
  148 static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
  149 static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
  150 static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
  151 
  152 /*
  153  * The AT91 family of products has the ethernet called EMAC.  However,
  154  * it isn't self identifying.  It is anticipated that the parent bus
  155  * code will take care to only add ate devices where they really are.  As
  156  * such, we do nothing here to identify the device and just set its name.
  157  */
  158 static int
  159 ate_probe(device_t dev)
  160 {
  161         device_set_desc(dev, "EMAC");
  162         return (0);
  163 }
  164 
  165 static int
  166 ate_attach(device_t dev)
  167 {
  168         struct ate_softc *sc = device_get_softc(dev);
  169         struct ifnet *ifp = NULL;
  170         struct sysctl_ctx_list *sctx;
  171         struct sysctl_oid *soid;
  172         int err;
  173         u_char eaddr[6];
  174 
  175         sc->dev = dev;
  176         err = ate_activate(dev);
  177         if (err)
  178                 goto out;
  179 
  180         sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
  181 
  182         /*Sysctls*/
  183         sctx = device_get_sysctl_ctx(dev);
  184         soid = device_get_sysctl_tree(dev);
  185         SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
  186             CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
  187 
  188         /* calling atestop before ifp is set is OK */
  189         atestop(sc);
  190         ATE_LOCK_INIT(sc);
  191         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  192 
  193         if ((err = ate_get_mac(sc, eaddr)) != 0) {
  194                 device_printf(dev, "No MAC address set\n");
  195                 goto out;
  196         }
  197         ate_set_mac(sc, eaddr);
  198 
  199         sc->ifp = ifp = if_alloc(IFT_ETHER);
  200         if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
  201                 device_printf(dev, "Cannot find my PHY.\n");
  202                 err = ENXIO;
  203                 goto out;
  204         }
  205 
  206         ifp->if_softc = sc;
  207         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  208         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  209         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  210         ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
  211         ifp->if_start = atestart;
  212         ifp->if_ioctl = ateioctl;
  213         ifp->if_init = ateinit;
  214         ifp->if_baudrate = 10000000;
  215         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
  216         ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
  217         IFQ_SET_READY(&ifp->if_snd);
  218         ifp->if_timer = 0;
  219         ifp->if_linkmib = &sc->mibdata;
  220         ifp->if_linkmiblen = sizeof(sc->mibdata);
  221         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
  222 
  223         ether_ifattach(ifp, eaddr);
  224 
  225         /*
  226          * Activate the interrupt
  227          */
  228         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
  229             NULL, ate_intr, sc, &sc->intrhand);
  230         if (err) {
  231                 ether_ifdetach(ifp);
  232                 ATE_LOCK_DESTROY(sc);
  233         }
  234 out:;
  235         if (err)
  236                 ate_deactivate(dev);
  237         if (err && ifp)
  238                 if_free(ifp);
  239         return (err);
  240 }
  241 
  242 static int
  243 ate_detach(device_t dev)
  244 {
  245         return EBUSY;   /* XXX TODO(1) */
  246 }
  247 
  248 static void
  249 ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  250 {
  251         struct ate_softc *sc;
  252 
  253         if (error != 0)
  254                 return;
  255         sc = (struct ate_softc *)arg;
  256         sc->rx_desc_phys = segs[0].ds_addr;
  257 }
  258 
  259 static void
  260 ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  261 {
  262         struct ate_softc *sc;
  263         int i;
  264 
  265         if (error != 0)
  266                 return;
  267         sc = (struct ate_softc *)arg;
  268         i = sc->rx_buf_ptr;
  269 
  270         /*
  271          * For the last buffer, set the wrap bit so the controller
  272          * restarts from the first descriptor.
  273          */
  274         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  275         if (i == ATE_MAX_RX_BUFFERS - 1)
  276                 sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
  277         else
  278                 sc->rx_descs[i].addr = segs[0].ds_addr;
  279         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
  280         sc->rx_descs[i].status = 0;
  281         /* Flush the memory in the mbuf */
  282         bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
  283 }
  284 
  285 /*
  286  * Compute the multicast filter for this device using the standard
  287  * algorithm.  I wonder why this isn't in ether somewhere as a lot
  288  * of different MAC chips use this method (or the reverse the bits)
  289  * method.
  290  */
  291 static void
  292 ate_setmcast(struct ate_softc *sc)
  293 {
  294         uint32_t index;
  295         uint32_t mcaf[2];
  296         u_char *af = (u_char *) mcaf;
  297         struct ifmultiaddr *ifma;
  298 
  299         mcaf[0] = 0;
  300         mcaf[1] = 0;
  301 
  302         IF_ADDR_LOCK(sc->ifp);
  303         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
  304                 if (ifma->ifma_addr->sa_family != AF_LINK)
  305                         continue;
  306                 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  307                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
  308                 af[index >> 3] |= 1 << (index & 7);
  309         }
  310         IF_ADDR_UNLOCK(sc->ifp);
  311 
  312         /*
  313          * Write the hash to the hash register.  This card can also
  314          * accept unicast packets as well as multicast packets using this
  315          * register for easier bridging operations, but we don't take
  316          * advantage of that.  Locks here are to avoid LOR with the
  317          * IF_ADDR_LOCK, but might not be strictly necessary.
  318          */
  319         WR4(sc, ETH_HSL, mcaf[0]);
  320         WR4(sc, ETH_HSH, mcaf[1]);
  321 }
  322 
  323 static int
  324 ate_activate(device_t dev)
  325 {
  326         struct ate_softc *sc;
  327         int rid, err, i;
  328 
  329         sc = device_get_softc(dev);
  330         rid = 0;
  331         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  332             RF_ACTIVE);
  333         if (sc->mem_res == NULL)
  334                 goto errout;
  335         rid = 0;
  336         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  337             RF_ACTIVE);
  338         if (sc->irq_res == NULL)
  339                 goto errout;
  340 
  341         /*
  342          * Allocate DMA tags and maps
  343          */
  344         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  345             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  346             busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
  347         if (err != 0)
  348                 goto errout;
  349         for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
  350                 err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
  351                 if (err != 0)
  352                         goto errout;
  353         }
  354          /*
  355           * Allocate our Rx buffers.  This chip has a rx structure that's filled
  356           * in
  357           */
  358         
  359         /*
  360          * Allocate DMA tags and maps for RX.
  361          */
  362         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  363             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  364             busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
  365         if (err != 0)
  366                 goto errout;
  367 
  368         /* Dma TAG and MAP for the rx descriptors. */
  369         err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0, 
  370             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  371             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
  372             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
  373             &sc->sc_mtx, &sc->rx_desc_tag);
  374         if (err != 0)
  375                 goto errout;
  376         if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
  377             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
  378                 goto errout;
  379         if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
  380             sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
  381             ate_getaddr, sc, 0) != 0)
  382                 goto errout;
  383         /* XXX TODO(5) Put this in ateinit_locked? */
  384         for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
  385                 sc->rx_buf_ptr = i;
  386                 if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
  387                       BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
  388                         goto errout;
  389                 if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
  390                     MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
  391                         goto errout;
  392         }
  393         sc->rx_buf_ptr = 0;
  394         /* Flush the memory for the EMAC rx descriptor */
  395         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  396         /* Write the descriptor queue address. */
  397         WR4(sc, ETH_RBQP, sc->rx_desc_phys);
  398         return (0);
  399 errout:
  400         ate_deactivate(dev);
  401         return (ENOMEM);
  402 }
  403 
  404 static void
  405 ate_deactivate(device_t dev)
  406 {
  407         struct ate_softc *sc;
  408 
  409         sc = device_get_softc(dev);
  410         /* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
  411 #if 0
  412         if (sc->fxp_mtag) {
  413                 for (i = 0; i < FXP_NRFABUFS; i++) {
  414                         rxp = &sc->fxp_desc.rx_list[i];
  415                         if (rxp->rx_mbuf != NULL) {
  416                                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
  417                                     BUS_DMASYNC_POSTREAD);
  418                                 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
  419                                 m_freem(rxp->rx_mbuf);
  420                         }
  421                         bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
  422                 }
  423                 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
  424                 for (i = 0; i < FXP_NTXCB; i++) {
  425                         txp = &sc->fxp_desc.tx_list[i];
  426                         if (txp->tx_mbuf != NULL) {
  427                                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
  428                                     BUS_DMASYNC_POSTWRITE);
  429                                 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
  430                                 m_freem(txp->tx_mbuf);
  431                         }
  432                         bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
  433                 }
  434                 bus_dma_tag_destroy(sc->fxp_mtag);
  435         }
  436         if (sc->fxp_stag)
  437                 bus_dma_tag_destroy(sc->fxp_stag);
  438         if (sc->cbl_tag)
  439                 bus_dma_tag_destroy(sc->cbl_tag);
  440         if (sc->mcs_tag)
  441                 bus_dma_tag_destroy(sc->mcs_tag);
  442 #endif
  443         if (sc->intrhand)
  444                 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
  445         sc->intrhand = 0;
  446         bus_generic_detach(sc->dev);
  447         if (sc->miibus)
  448                 device_delete_child(sc->dev, sc->miibus);
  449         if (sc->mem_res)
  450                 bus_release_resource(dev, SYS_RES_IOPORT,
  451                     rman_get_rid(sc->mem_res), sc->mem_res);
  452         sc->mem_res = 0;
  453         if (sc->irq_res)
  454                 bus_release_resource(dev, SYS_RES_IRQ,
  455                     rman_get_rid(sc->irq_res), sc->irq_res);
  456         sc->irq_res = 0;
  457         return;
  458 }
  459 
  460 /*
  461  * Change media according to request.
  462  */
  463 static int
  464 ate_ifmedia_upd(struct ifnet *ifp)
  465 {
  466         struct ate_softc *sc = ifp->if_softc;
  467         struct mii_data *mii;
  468 
  469         mii = device_get_softc(sc->miibus);
  470         ATE_LOCK(sc);
  471         mii_mediachg(mii);
  472         ATE_UNLOCK(sc);
  473         return (0);
  474 }
  475 
  476 /*
  477  * Notify the world which media we're using.
  478  */
  479 static void
  480 ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  481 {
  482         struct ate_softc *sc = ifp->if_softc;
  483         struct mii_data *mii;
  484 
  485         mii = device_get_softc(sc->miibus);
  486         ATE_LOCK(sc);
  487         mii_pollstat(mii);
  488         ifmr->ifm_active = mii->mii_media_active;
  489         ifmr->ifm_status = mii->mii_media_status;
  490         ATE_UNLOCK(sc);
  491 }
  492 
  493 static void
  494 ate_stat_update(struct ate_softc *sc, int active)
  495 {
  496         /*
  497          * The speed and full/half-duplex state needs to be reflected
  498          * in the ETH_CFG register.
  499          */
  500         if (IFM_SUBTYPE(active) == IFM_10_T)
  501                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
  502         else
  503                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
  504         if (active & IFM_FDX)
  505                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
  506         else
  507                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
  508 }
  509 
  510 static void
  511 ate_tick(void *xsc)
  512 {
  513         struct ate_softc *sc = xsc;
  514         struct ifnet *ifp = sc->ifp;
  515         struct mii_data *mii;
  516         int active;
  517         uint32_t c;
  518 
  519         /*
  520          * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
  521          * the MII if there's a link if this bit is clear.  Not sure if we
  522          * should do the same thing here or not.
  523          */
  524         ATE_ASSERT_LOCKED(sc);
  525         if (sc->miibus != NULL) {
  526                 mii = device_get_softc(sc->miibus);
  527                 active = mii->mii_media_active;
  528                 mii_tick(mii);
  529                 if (mii->mii_media_status & IFM_ACTIVE &&
  530                      active != mii->mii_media_active)
  531                         ate_stat_update(sc, mii->mii_media_active);
  532         }
  533 
  534         /*
  535          * Update the stats as best we can.  When we're done, clear
  536          * the status counters and start over.  We're supposed to read these
  537          * registers often enough that they won't overflow.  Hopefully
  538          * once a second is often enough.  Some don't map well to
  539          * the dot3Stats mib, so for those we just count them as general
  540          * errors.  Stats for iframes, ibutes, oframes and obytes are
  541          * collected elsewhere.  These registers zero on a read to prevent
  542          * races.  For all the collision stats, also update the collision
  543          * stats for the interface.
  544          */
  545         sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
  546         sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
  547         c = RD4(sc, ETH_SCOL);
  548         ifp->if_collisions += c;
  549         sc->mibdata.dot3StatsSingleCollisionFrames += c;
  550         c = RD4(sc, ETH_MCOL);
  551         sc->mibdata.dot3StatsMultipleCollisionFrames += c;
  552         ifp->if_collisions += c;
  553         sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
  554         sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
  555         c = RD4(sc, ETH_LCOL);
  556         sc->mibdata.dot3StatsLateCollisions += c;
  557         ifp->if_collisions += c;
  558         c = RD4(sc, ETH_ECOL);
  559         sc->mibdata.dot3StatsExcessiveCollisions += c;
  560         ifp->if_collisions += c;
  561         sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
  562         sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
  563         sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
  564         /*
  565          * not sure where to lump these, so count them against the errors
  566          * for the interface.
  567          */
  568         sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
  569         sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
  570             RD4(sc, ETH_USF);
  571 
  572         /*
  573          * Schedule another timeout one second from now.
  574          */
  575         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  576 }
  577 
  578 static void
  579 ate_set_mac(struct ate_softc *sc, u_char *eaddr)
  580 {
  581         WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
  582             (eaddr[1] << 8) | eaddr[0]);
  583         WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
  584 }
  585 
  586 static int
  587 ate_get_mac(struct ate_softc *sc, u_char *eaddr)
  588 {
  589         uint32_t low, high;
  590 
  591         /*
  592          * The boot loader setup the MAC with an address, if one is set in
  593          * the loader.  The TSC loader will also set the MAC address in a
  594          * similar way.  Grab the MAC address from the SA1[HL] registers.
  595          */
  596         low = RD4(sc, ETH_SA1L);
  597         high =  RD4(sc, ETH_SA1H);
  598         if ((low | (high & 0xffff)) == 0)
  599                 return (ENXIO);
  600         eaddr[0] = low & 0xff;
  601         eaddr[1] = (low >> 8) & 0xff;
  602         eaddr[2] = (low >> 16) & 0xff;
  603         eaddr[3] = (low >> 24) & 0xff;
  604         eaddr[4] = high & 0xff;
  605         eaddr[5] = (high >> 8) & 0xff;
  606         return (0);
  607 }
  608 
  609 static void
  610 ate_intr(void *xsc)
  611 {
  612         struct ate_softc *sc = xsc;
  613         struct ifnet *ifp = sc->ifp;
  614         int status;
  615         int i;
  616         void *bp;
  617         struct mbuf *mb;
  618         uint32_t rx_stat;
  619 
  620         status = RD4(sc, ETH_ISR);
  621         if (status == 0)
  622                 return;
  623         if (status & ETH_ISR_RCOM) {
  624                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  625                     BUS_DMASYNC_POSTREAD);
  626                 while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
  627                         i = sc->rx_buf_ptr;
  628                         sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
  629                         bp = sc->rx_buf[i];
  630                         rx_stat = sc->rx_descs[i].status;
  631                         if ((rx_stat & ETH_LEN_MASK) == 0) {
  632                                 printf("ignoring bogus 0 len packet\n");
  633                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  634                                     BUS_DMASYNC_PREWRITE);
  635                                 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  636                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  637                                     BUS_DMASYNC_POSTWRITE);
  638                                 continue;
  639                         }
  640                         /* Flush memory for mbuf so we don't get stale bytes */
  641                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  642                             BUS_DMASYNC_POSTREAD);
  643                         WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
  644 
  645                         /*
  646                          * The length returned by the device includes the
  647                          * ethernet CRC calculation for the packet, but
  648                          * ifnet drivers are supposed to discard it.
  649                          */
  650                         mb = m_devget(sc->rx_buf[i],
  651                             (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
  652                             ETHER_ALIGN, ifp, NULL);
  653                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  654                             BUS_DMASYNC_PREWRITE);
  655                         sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  656                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  657                             BUS_DMASYNC_POSTWRITE);
  658                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  659                             BUS_DMASYNC_PREREAD);
  660                         if (mb != NULL) {
  661                                 ifp->if_ipackets++;
  662                                 (*ifp->if_input)(ifp, mb);
  663                         }
  664                         
  665                 }
  666         }
  667         if (status & ETH_ISR_TCOM) {
  668                 ATE_LOCK(sc);
  669                 /* XXX TSR register should be cleared */
  670                 if (sc->sent_mbuf[0]) {
  671                         bus_dmamap_sync(sc->mtag, sc->tx_map[0],
  672                             BUS_DMASYNC_POSTWRITE);
  673                         m_freem(sc->sent_mbuf[0]);
  674                         ifp->if_opackets++;
  675                         sc->sent_mbuf[0] = NULL;
  676                 }
  677                 if (sc->sent_mbuf[1]) {
  678                         if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
  679                                 bus_dmamap_sync(sc->mtag, sc->tx_map[1],
  680                                     BUS_DMASYNC_POSTWRITE);
  681                                 m_freem(sc->sent_mbuf[1]);
  682                                 ifp->if_opackets++;
  683                                 sc->txcur = 0;
  684                                 sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
  685                         } else {
  686                                 sc->sent_mbuf[0] = sc->sent_mbuf[1];
  687                                 sc->sent_mbuf[1] = NULL;
  688                                 sc->txcur = 1;
  689                         }
  690                 } else {
  691                         sc->sent_mbuf[0] = NULL;
  692                         sc->txcur = 0;
  693                 }
  694                 /*
  695                  * We're no longer busy, so clear the busy flag and call the
  696                  * start routine to xmit more packets.
  697                  */
  698                 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  699                 atestart_locked(sc->ifp);
  700                 ATE_UNLOCK(sc);
  701         }
  702         if (status & ETH_ISR_RBNA) {
  703                 printf("RBNA workaround\n");
  704                 /* Workaround Errata #11 */
  705                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
  706                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
  707         }
  708 }
  709 
  710 /*
  711  * Reset and initialize the chip
  712  */
  713 static void
  714 ateinit_locked(void *xsc)
  715 {
  716         struct ate_softc *sc = xsc;
  717         struct ifnet *ifp = sc->ifp;
  718         struct mii_data *mii;
  719 
  720         ATE_ASSERT_LOCKED(sc);
  721 
  722         /*
  723          * XXX TODO(3)
  724          * we need to turn on the EMAC clock in the pmc.  With the
  725          * default boot loader, this is already turned on.  However, we
  726          * need to think about how best to turn it on/off as the interface
  727          * is brought up/down, as well as dealing with the mii bus...
  728          *
  729          * We also need to multiplex the pins correctly.
  730          */
  731 
  732         /*
  733          * There are two different ways that the mii bus is connected
  734          * to this chip.  Select the right one based on a compile-time
  735          * option.
  736          */
  737         if (sc->use_rmii)
  738                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
  739         else
  740                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
  741 
  742         /*
  743          * Turn on the multicast hash, and write 0's to it.
  744          */
  745         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI);
  746         WR4(sc, ETH_HSH, 0);
  747         WR4(sc, ETH_HSL, 0);
  748 
  749         WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
  750         WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
  751 
  752         /*
  753          * Boot loader fills in MAC address.  If that's not the case, then
  754          * we should set SA1L and SA1H here to the appropriate value.  Note:
  755          * the byte order is big endian, not little endian, so we have some
  756          * swapping to do.  Again, if we need it (which I don't think we do).
  757          */
  758         ate_setmcast(sc);
  759 
  760         /* enable big packets */
  761         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  762 
  763         /*
  764          * Set 'running' flag, and clear output active flag
  765          * and attempt to start the output
  766          */
  767         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  768         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  769 
  770         mii = device_get_softc(sc->miibus);
  771         mii_pollstat(mii);
  772         ate_stat_update(sc, mii->mii_media_active);
  773         atestart_locked(ifp);
  774 
  775         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  776 }
  777 
  778 /*
  779  * dequeu packets and transmit
  780  */
  781 static void
  782 atestart_locked(struct ifnet *ifp)
  783 {
  784         struct ate_softc *sc = ifp->if_softc;
  785         struct mbuf *m, *mdefrag;
  786         bus_dma_segment_t segs[1];
  787         int nseg, e;
  788 
  789         ATE_ASSERT_LOCKED(sc);
  790         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
  791                 return;
  792 
  793         while (sc->txcur < ATE_MAX_TX_BUFFERS) {
  794                 /*
  795                  * check to see if there's room to put another packet into the
  796                  * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
  797                  * packets.  We use OACTIVE to indicate "we can stuff more into
  798                  * our buffers (clear) or not (set)."
  799                  */
  800                 if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
  801                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  802                         return;
  803                 }
  804                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
  805                 if (m == 0) {
  806                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  807                         return;
  808                 }
  809                 e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
  810                     segs, &nseg, 0);
  811                 if (e == EFBIG) {
  812                         mdefrag = m_defrag(m, M_DONTWAIT);
  813                         if (mdefrag == NULL) {
  814                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
  815                                 return;
  816                         }
  817                         m = mdefrag;
  818                         e = bus_dmamap_load_mbuf_sg(sc->mtag,
  819                             sc->tx_map[sc->txcur], m, segs, &nseg, 0);
  820                 }
  821                 if (e != 0) {
  822                         m_freem(m);
  823                         continue;
  824                 }
  825                 bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
  826                     BUS_DMASYNC_PREWRITE);
  827 
  828                 /*
  829                  * tell the hardware to xmit the packet.
  830                  */
  831                 WR4(sc, ETH_TAR, segs[0].ds_addr);
  832                 WR4(sc, ETH_TCR, segs[0].ds_len);
  833         
  834                 /*
  835                  * Tap off here if there is a bpf listener.
  836                  */
  837                 BPF_MTAP(ifp, m);
  838 
  839                 sc->sent_mbuf[sc->txcur] = m;
  840                 sc->txcur++;
  841         }
  842 }
  843 
  844 static void
  845 ateinit(void *xsc)
  846 {
  847         struct ate_softc *sc = xsc;
  848         ATE_LOCK(sc);
  849         ateinit_locked(sc);
  850         ATE_UNLOCK(sc);
  851 }
  852 
  853 static void
  854 atestart(struct ifnet *ifp)
  855 {
  856         struct ate_softc *sc = ifp->if_softc;
  857         ATE_LOCK(sc);
  858         atestart_locked(ifp);
  859         ATE_UNLOCK(sc);
  860 }
  861 
  862 /*
  863  * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
  864  * so be careful.
  865  */
  866 static void
  867 atestop(struct ate_softc *sc)
  868 {
  869         struct ifnet *ifp = sc->ifp;
  870 
  871         if (ifp) {
  872                 ifp->if_timer = 0;
  873                 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  874         }
  875 
  876         callout_stop(&sc->tick_ch);
  877 
  878         /*
  879          * Enable some parts of the MAC that are needed always (like the
  880          * MII bus.  This turns off the RE and TE bits, which will remain
  881          * off until ateinit() is called to turn them on.  With RE and TE
  882          * turned off, there's no DMA to worry about after this write.
  883          */
  884         WR4(sc, ETH_CTL, ETH_CTL_MPE);
  885 
  886         /*
  887          * Turn off all the configured options and revert to defaults.
  888          */
  889         WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
  890 
  891         /*
  892          * Turn off all the interrupts, and ack any pending ones by reading
  893          * the ISR.
  894          */
  895         WR4(sc, ETH_IDR, 0xffffffff);
  896         RD4(sc, ETH_ISR);
  897 
  898         /*
  899          * Clear out the Transmit and Receiver Status registers of any
  900          * errors they may be reporting
  901          */
  902         WR4(sc, ETH_TSR, 0xffffffff);
  903         WR4(sc, ETH_RSR, 0xffffffff);
  904 
  905         /*
  906          * XXX TODO(8)
  907          * need to worry about the busdma resources?  Yes, I think we need
  908          * to sync and unload them.  We may also need to release the mbufs
  909          * that are assocaited with RX and TX operations.
  910          */
  911 
  912         /*
  913          * XXX we should power down the EMAC if it isn't in use, after
  914          * putting it into loopback mode.  This saves about 400uA according
  915          * to the datasheet.
  916          */
  917 }
  918 
  919 static int
  920 ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  921 {
  922         struct ate_softc *sc = ifp->if_softc;
  923         struct mii_data *mii;
  924         struct ifreq *ifr = (struct ifreq *)data;       
  925         int mask, error = 0;
  926 
  927         switch (cmd) {
  928         case SIOCSIFFLAGS:
  929                 ATE_LOCK(sc);
  930                 if ((ifp->if_flags & IFF_UP) == 0 &&
  931                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
  932                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  933                         atestop(sc);
  934                 } else {
  935                         /* reinitialize card on any parameter change */
  936                         ateinit_locked(sc);
  937                 }
  938                 ATE_UNLOCK(sc);
  939                 break;
  940 
  941         case SIOCADDMULTI:
  942         case SIOCDELMULTI:
  943                 /* update multicast filter list. */
  944                 ATE_LOCK(sc);
  945                 ate_setmcast(sc);
  946                 ATE_UNLOCK(sc);
  947                 error = 0;
  948                 break;
  949 
  950         case SIOCSIFMEDIA:
  951         case SIOCGIFMEDIA:
  952                 mii = device_get_softc(sc->miibus);
  953                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
  954                 break;
  955         case SIOCSIFCAP:
  956                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
  957                 if (mask & IFCAP_VLAN_MTU) {
  958                         ATE_LOCK(sc);
  959                         if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
  960                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  961                                 ifp->if_capenable |= IFCAP_VLAN_MTU;
  962                         } else {
  963                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
  964                                 ifp->if_capenable &= ~IFCAP_VLAN_MTU;
  965                         }
  966                         ATE_UNLOCK(sc);
  967                 }
  968         default:
  969                 error = ether_ioctl(ifp, cmd, data);
  970                 break;
  971         }
  972         return (error);
  973 }
  974 
  975 static void
  976 ate_child_detached(device_t dev, device_t child)
  977 {
  978         struct ate_softc *sc;
  979 
  980         sc = device_get_softc(dev);
  981         if (child == sc->miibus)
  982                 sc->miibus = NULL;
  983 }
  984 
  985 /*
  986  * MII bus support routines.
  987  */
  988 static int
  989 ate_miibus_readreg(device_t dev, int phy, int reg)
  990 {
  991         struct ate_softc *sc;
  992         int val;
  993 
  994         /*
  995          * XXX if we implement agressive power savings, then we need
  996          * XXX to make sure that the clock to the emac is on here
  997          */
  998 
  999         sc = device_get_softc(dev);
 1000         DELAY(1);       /* Hangs w/o this delay really 30.5us atm */
 1001         WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
 1002         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1003                 continue;
 1004         val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
 1005 
 1006         return (val);
 1007 }
 1008 
 1009 static void
 1010 ate_miibus_writereg(device_t dev, int phy, int reg, int data)
 1011 {
 1012         struct ate_softc *sc;
 1013         
 1014         /*
 1015          * XXX if we implement agressive power savings, then we need
 1016          * XXX to make sure that the clock to the emac is on here
 1017          */
 1018 
 1019         sc = device_get_softc(dev);
 1020         WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
 1021         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1022                 continue;
 1023         return;
 1024 }
 1025 
 1026 static device_method_t ate_methods[] = {
 1027         /* Device interface */
 1028         DEVMETHOD(device_probe,         ate_probe),
 1029         DEVMETHOD(device_attach,        ate_attach),
 1030         DEVMETHOD(device_detach,        ate_detach),
 1031 
 1032         /* Bus interface */
 1033         DEVMETHOD(bus_child_detached,   ate_child_detached),
 1034 
 1035         /* MII interface */
 1036         DEVMETHOD(miibus_readreg,       ate_miibus_readreg),
 1037         DEVMETHOD(miibus_writereg,      ate_miibus_writereg),
 1038 
 1039         { 0, 0 }
 1040 };
 1041 
 1042 static driver_t ate_driver = {
 1043         "ate",
 1044         ate_methods,
 1045         sizeof(struct ate_softc),
 1046 };
 1047 
 1048 DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
 1049 DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
 1050 MODULE_DEPEND(ate, miibus, 1, 1, 1);
 1051 MODULE_DEPEND(ate, ether, 1, 1, 1);

Cache object: f6bf12366f7f9d2340c9a8e160fa0997


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.