The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/at91/if_ate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 /* TODO: (in no order)
   26  *
   27  * 8) Need to sync busdma goo in atestop
   28  * 9) atestop should maybe free the mbufs?
   29  *
   30  * 1) detach
   31  * 2) Free dma setup
   32  * 3) Turn on the clock in pmc and turn on pins?  Turn off?
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/6.2/sys/arm/at91/if_ate.c 160482 2006-07-18 21:57:35Z imp $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/kernel.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/socket.h>
   47 #include <sys/sockio.h>
   48 #include <machine/bus.h>
   49 
   50 #include <net/ethernet.h>
   51 #include <net/if.h>
   52 #include <net/if_arp.h>
   53 #include <net/if_dl.h>
   54 #include <net/if_media.h>
   55 #include <net/if_mib.h>
   56 #include <net/if_types.h>
   57 
   58 #ifdef INET
   59 #include <netinet/in.h>
   60 #include <netinet/in_systm.h>
   61 #include <netinet/in_var.h>
   62 #include <netinet/ip.h>
   63 #endif
   64 
   65 #include <net/bpf.h>
   66 #include <net/bpfdesc.h>
   67 
   68 #include <dev/mii/mii.h>
   69 #include <dev/mii/miivar.h>
   70 #include <arm/at91/if_atereg.h>
   71 
   72 #include "miibus_if.h"
   73 
   74 #define ATE_MAX_TX_BUFFERS 64           /* We have ping-pong tx buffers */
   75 #define ATE_MAX_RX_BUFFERS 64
   76 
   77 struct ate_softc
   78 {
   79         struct ifnet *ifp;              /* ifnet pointer */
   80         struct mtx sc_mtx;              /* basically a perimeter lock */
   81         device_t dev;                   /* Myself */
   82         device_t miibus;                /* My child miibus */
   83         void *intrhand;                 /* Interrupt handle */
   84         struct resource *irq_res;       /* IRQ resource */
   85         struct resource *mem_res;       /* Memory resource */
   86         struct callout tick_ch;         /* Tick callout */
   87         bus_dma_tag_t mtag;             /* bus dma tag for mbufs */
   88         bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
   89         struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
   90         bus_dma_tag_t rxtag;
   91         bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
   92         void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
   93         int rx_buf_ptr;
   94         bus_dma_tag_t rx_desc_tag;
   95         bus_dmamap_t rx_desc_map;
   96         int txcur;                      /* current tx map pointer */
   97         bus_addr_t rx_desc_phys;
   98         eth_rx_desc_t *rx_descs;
   99         int use_rmii;
  100         struct  ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
  101 };
  102 
  103 static inline uint32_t
  104 RD4(struct ate_softc *sc, bus_size_t off)
  105 {
  106         return bus_read_4(sc->mem_res, off);
  107 }
  108 
  109 static inline void
  110 WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
  111 {
  112         bus_write_4(sc->mem_res, off, val);
  113 }
  114 
  115 #define ATE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  116 #define ATE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  117 #define ATE_LOCK_INIT(_sc) \
  118         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
  119             MTX_NETWORK_LOCK, MTX_DEF)
  120 #define ATE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  121 #define ATE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  122 #define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  123 
  124 static devclass_t ate_devclass;
  125 
  126 /* ifnet entry points */
  127 
  128 static void ateinit_locked(void *);
  129 static void atestart_locked(struct ifnet *);
  130 
  131 static void ateinit(void *);
  132 static void atestart(struct ifnet *);
  133 static void atestop(struct ate_softc *);
  134 static void atewatchdog(struct ifnet *);
  135 static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
  136 
  137 /* bus entry points */
  138 
  139 static int ate_probe(device_t dev);
  140 static int ate_attach(device_t dev);
  141 static int ate_detach(device_t dev);
  142 static void ate_intr(void *);
  143 
  144 /* helper routines */
  145 static int ate_activate(device_t dev);
  146 static void ate_deactivate(device_t dev);
  147 static int ate_ifmedia_upd(struct ifnet *ifp);
  148 static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
  149 static void ate_get_mac(struct ate_softc *sc, u_char *eaddr);
  150 static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
  151 
  152 /*
  153  * The AT91 family of products has the ethernet called EMAC.  However,
  154  * it isn't self identifying.  It is anticipated that the parent bus
  155  * code will take care to only add ate devices where they really are.  As
  156  * such, we do nothing here to identify the device and just set its name.
  157  */
  158 static int
  159 ate_probe(device_t dev)
  160 {
  161         device_set_desc(dev, "EMAC");
  162         return (0);
  163 }
  164 
  165 static int
  166 ate_attach(device_t dev)
  167 {
  168         struct ate_softc *sc = device_get_softc(dev);
  169         struct ifnet *ifp = NULL;
  170         int err;
  171         u_char eaddr[6];
  172 
  173         sc->dev = dev;
  174         err = ate_activate(dev);
  175         if (err)
  176                 goto out;
  177 
  178         sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
  179 
  180         /* calling atestop before ifp is set is OK */
  181         atestop(sc);
  182         ATE_LOCK_INIT(sc);
  183         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  184 
  185         ate_get_mac(sc, eaddr);
  186         ate_set_mac(sc, eaddr);
  187 
  188 
  189         sc->ifp = ifp = if_alloc(IFT_ETHER);
  190         if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
  191                 device_printf(dev, "Cannot find my PHY.\n");
  192                 err = ENXIO;
  193                 goto out;
  194         }
  195 
  196         ifp->if_softc = sc;
  197         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  198         ifp->if_mtu = ETHERMTU;
  199         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  200         ifp->if_start = atestart;
  201         ifp->if_ioctl = ateioctl;
  202         ifp->if_watchdog = atewatchdog;
  203         ifp->if_init = ateinit;
  204         ifp->if_baudrate = 10000000;
  205         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
  206         ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
  207         IFQ_SET_READY(&ifp->if_snd);
  208         ifp->if_timer = 0;
  209         ifp->if_linkmib = &sc->mibdata;
  210         ifp->if_linkmiblen = sizeof(sc->mibdata);
  211         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
  212 
  213         ether_ifattach(ifp, eaddr);
  214 
  215         /*
  216          * Activate the interrupt
  217          */
  218         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
  219             ate_intr, sc, &sc->intrhand);
  220         if (err) {
  221                 ether_ifdetach(ifp);
  222                 ATE_LOCK_DESTROY(sc);
  223         }
  224 out:;
  225         if (err)
  226                 ate_deactivate(dev);
  227         if (err && ifp)
  228                 if_free(ifp);
  229         return (err);
  230 }
  231 
  232 static int
  233 ate_detach(device_t dev)
  234 {
  235         return EBUSY;   /* XXX TODO(1) */
  236 }
  237 
  238 static void
  239 ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  240 {
  241         struct ate_softc *sc;
  242 
  243         if (error != 0)
  244                 return;
  245         sc = (struct ate_softc *)arg;
  246         sc->rx_desc_phys = segs[0].ds_addr;
  247 }
  248 
  249 static void
  250 ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  251 {
  252         struct ate_softc *sc;
  253         int i;
  254 
  255         if (error != 0)
  256                 return;
  257         sc = (struct ate_softc *)arg;
  258         i = sc->rx_buf_ptr;
  259 
  260         /*
  261          * For the last buffer, set the wrap bit so the controller
  262          * restarts from the first descriptor.
  263          */
  264         if (i == ATE_MAX_RX_BUFFERS - 1)
  265                 sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
  266         else
  267                 sc->rx_descs[i].addr = segs[0].ds_addr;
  268         sc->rx_descs[i].status = 0;
  269         /* Flush the memory in the mbuf */
  270         bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
  271 }
  272 
  273 /*
  274  * Compute the multicast filter for this device using the standard
  275  * algorithm.  I wonder why this isn't in ether somewhere as a lot
  276  * of different MAC chips use this method (or the reverse the bits)
  277  * method.
  278  */
  279 static void
  280 ate_setmcast(struct ate_softc *sc)
  281 {
  282         uint32_t index;
  283         uint32_t mcaf[2];
  284         u_char *af = (u_char *) mcaf;
  285         struct ifmultiaddr *ifma;
  286 
  287         mcaf[0] = 0;
  288         mcaf[1] = 0;
  289 
  290         IF_ADDR_LOCK(sc->ifp);
  291         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
  292                 if (ifma->ifma_addr->sa_family != AF_LINK)
  293                         continue;
  294                 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  295                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
  296                 af[index >> 3] |= 1 << (index & 7);
  297         }
  298         IF_ADDR_UNLOCK(sc->ifp);
  299 
  300         /*
  301          * Write the hash to the hash register.  This card can also
  302          * accept unicast packets as well as multicast packets using this
  303          * register for easier bridging operations, but we don't take
  304          * advantage of that.  Locks here are to avoid LOR with the
  305          * IF_ADDR_LOCK, but might not be strictly necessary.
  306          */
  307         WR4(sc, ETH_HSL, mcaf[0]);
  308         WR4(sc, ETH_HSH, mcaf[1]);
  309 }
  310 
  311 static int
  312 ate_activate(device_t dev)
  313 {
  314         struct ate_softc *sc;
  315         int rid, err, i;
  316 
  317         sc = device_get_softc(dev);
  318         rid = 0;
  319         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  320             RF_ACTIVE);
  321         if (sc->mem_res == NULL)
  322                 goto errout;
  323         rid = 0;
  324         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  325             RF_ACTIVE);
  326         if (sc->mem_res == NULL)
  327                 goto errout;
  328 
  329         /*
  330          * Allocate DMA tags and maps
  331          */
  332         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  333             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  334             busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
  335         if (err != 0)
  336                 goto errout;
  337         for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
  338                 err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
  339                 if (err != 0)
  340                         goto errout;
  341         }
  342          /*
  343           * Allocate our Rx buffers.  This chip has a rx structure that's filled
  344           * in
  345           */
  346         
  347         /*
  348          * Allocate DMA tags and maps for RX.
  349          */
  350         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  351             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  352             busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
  353         if (err != 0)
  354                 goto errout;
  355         for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
  356                 err = bus_dmamap_create(sc->rxtag, 0, &sc->rx_map[i]);
  357                 if (err != 0)
  358                         goto errout;
  359         }
  360 
  361         /* Dma TAG and MAP for the rx descriptors. */
  362         err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0, 
  363             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  364             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
  365             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
  366             &sc->sc_mtx, &sc->rx_desc_tag);
  367         if (err != 0)
  368                 goto errout;
  369         if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
  370             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
  371                 goto errout;
  372         if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
  373             sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
  374             ate_getaddr, sc, 0) != 0)
  375                 goto errout;
  376         /* XXX TODO(5) Put this in ateinit_locked? */
  377         for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
  378                 sc->rx_buf_ptr = i;
  379                 if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
  380                       BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
  381                         goto errout;
  382                 if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
  383                     MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
  384                         goto errout;
  385         }
  386         sc->rx_buf_ptr = 0;
  387         /* Flush the memory for the EMAC rx descriptor */
  388         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  389         /* Write the descriptor queue address. */
  390         WR4(sc, ETH_RBQP, sc->rx_desc_phys);
  391         return (0);
  392 errout:
  393         ate_deactivate(dev);
  394         return (ENOMEM);
  395 }
  396 
  397 static void
  398 ate_deactivate(device_t dev)
  399 {
  400         struct ate_softc *sc;
  401 
  402         sc = device_get_softc(dev);
  403         /* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
  404 #if 0
  405         if (sc->fxp_mtag) {
  406                 for (i = 0; i < FXP_NRFABUFS; i++) {
  407                         rxp = &sc->fxp_desc.rx_list[i];
  408                         if (rxp->rx_mbuf != NULL) {
  409                                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
  410                                     BUS_DMASYNC_POSTREAD);
  411                                 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
  412                                 m_freem(rxp->rx_mbuf);
  413                         }
  414                         bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
  415                 }
  416                 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
  417                 for (i = 0; i < FXP_NTXCB; i++) {
  418                         txp = &sc->fxp_desc.tx_list[i];
  419                         if (txp->tx_mbuf != NULL) {
  420                                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
  421                                     BUS_DMASYNC_POSTWRITE);
  422                                 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
  423                                 m_freem(txp->tx_mbuf);
  424                         }
  425                         bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
  426                 }
  427                 bus_dma_tag_destroy(sc->fxp_mtag);
  428         }
  429         if (sc->fxp_stag)
  430                 bus_dma_tag_destroy(sc->fxp_stag);
  431         if (sc->cbl_tag)
  432                 bus_dma_tag_destroy(sc->cbl_tag);
  433         if (sc->mcs_tag)
  434                 bus_dma_tag_destroy(sc->mcs_tag);
  435 #endif
  436         if (sc->intrhand)
  437                 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
  438         sc->intrhand = 0;
  439         bus_generic_detach(sc->dev);
  440         if (sc->miibus)
  441                 device_delete_child(sc->dev, sc->miibus);
  442         if (sc->mem_res)
  443                 bus_release_resource(dev, SYS_RES_IOPORT,
  444                     rman_get_rid(sc->mem_res), sc->mem_res);
  445         sc->mem_res = 0;
  446         if (sc->irq_res)
  447                 bus_release_resource(dev, SYS_RES_IRQ,
  448                     rman_get_rid(sc->irq_res), sc->irq_res);
  449         sc->irq_res = 0;
  450         return;
  451 }
  452 
  453 /*
  454  * Change media according to request.
  455  */
  456 static int
  457 ate_ifmedia_upd(struct ifnet *ifp)
  458 {
  459         struct ate_softc *sc = ifp->if_softc;
  460         struct mii_data *mii;
  461 
  462         mii = device_get_softc(sc->miibus);
  463         ATE_LOCK(sc);
  464         mii_mediachg(mii);
  465         ATE_UNLOCK(sc);
  466         return (0);
  467 }
  468 
  469 /*
  470  * Notify the world which media we're using.
  471  */
  472 static void
  473 ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  474 {
  475         struct ate_softc *sc = ifp->if_softc;
  476         struct mii_data *mii;
  477 
  478         mii = device_get_softc(sc->miibus);
  479         ATE_LOCK(sc);
  480         mii_pollstat(mii);
  481         ifmr->ifm_active = mii->mii_media_active;
  482         ifmr->ifm_status = mii->mii_media_status;
  483         ATE_UNLOCK(sc);
  484 }
  485 
  486 static void
  487 ate_tick(void *xsc)
  488 {
  489         struct ate_softc *sc = xsc;
  490         struct mii_data *mii;
  491         int active;
  492 
  493         /*
  494          * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
  495          * the MII if there's a link if this bit is clear.  Not sure if we
  496          * should do the same thing here or not.
  497          */
  498         ATE_ASSERT_LOCKED(sc);
  499         if (sc->miibus != NULL) {
  500                 mii = device_get_softc(sc->miibus);
  501                 active = mii->mii_media_active;
  502                 mii_tick(mii);
  503                 if (mii->mii_media_status & IFM_ACTIVE &&
  504                      active != mii->mii_media_active) {
  505                         /*
  506                          * The speed and full/half-duplex state needs
  507                          * to be reflected in the ETH_CFG register, it
  508                          * seems.
  509                          */
  510                         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)
  511                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) &
  512                                     ~ETH_CFG_SPD);
  513                         else
  514                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) |
  515                                     ETH_CFG_SPD);
  516                         if (mii->mii_media_active & IFM_FDX)
  517                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) |
  518                                     ETH_CFG_FD);
  519                         else
  520                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) &
  521                                     ~ETH_CFG_FD);
  522                 }
  523         }
  524 
  525         /*
  526          * Update the stats as best we can.  When we're done, clear
  527          * the status counters and start over.  We're supposed to read these
  528          * registers often enough that they won't overflow.  Hopefully
  529          * once a second is often enough.  Some don't map well to
  530          * the dot3Stats mib, so for those we just count them as general
  531          * errors.  Stats for iframes, ibutes, oframes and obytes are
  532          * collected elsewhere.  These registers zero on a read to prevent
  533          * races.
  534          */
  535         sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
  536         sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
  537         sc->mibdata.dot3StatsSingleCollisionFrames += RD4(sc, ETH_SCOL);
  538         sc->mibdata.dot3StatsMultipleCollisionFrames += RD4(sc, ETH_MCOL);
  539         sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
  540         sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
  541         sc->mibdata.dot3StatsLateCollisions += RD4(sc, ETH_LCOL);
  542         sc->mibdata.dot3StatsExcessiveCollisions += RD4(sc, ETH_ECOL);
  543         sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
  544         sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
  545         sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
  546         /*
  547          * not sure where to lump these, so count them against the errors
  548          * for the interface.
  549          */
  550         sc->ifp->if_oerrors += RD4(sc, ETH_CSE) + RD4(sc, ETH_TUE);
  551         sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
  552             RD4(sc, ETH_USF);
  553 
  554         /*
  555          * Schedule another timeout one second from now.
  556          */
  557         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  558 }
  559 
  560 static void
  561 ate_set_mac(struct ate_softc *sc, u_char *eaddr)
  562 {
  563         WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
  564             (eaddr[1] << 8) | eaddr[0]);
  565         WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
  566 
  567 }
  568 
  569 static void
  570 ate_get_mac(struct ate_softc *sc, u_char *eaddr)
  571 {
  572     uint32_t low, high;
  573 
  574     /*
  575      * The KB920x loaders will setup the MAC with an address, if one
  576      * is set in the loader.  The TSC loader will also set the MAC address
  577      * in a similar way.  Grab the MAC address from the SA1[HL] registers.
  578      */
  579     low = RD4(sc, ETH_SA1L);
  580     high =  RD4(sc, ETH_SA1H);
  581     eaddr[0] = (high >> 8) & 0xff;
  582     eaddr[1] = high & 0xff;
  583     eaddr[2] = (low >> 24) & 0xff;
  584     eaddr[3] = (low >> 16) & 0xff;
  585     eaddr[4] = (low >> 8) & 0xff;
  586     eaddr[5] = low & 0xff;
  587 }
  588 
  589 static void
  590 ate_intr(void *xsc)
  591 {
  592         struct ate_softc *sc = xsc;
  593         int status;
  594         int i;
  595         void *bp;
  596         struct mbuf *mb;
  597         uint32_t rx_stat;
  598 
  599         status = RD4(sc, ETH_ISR);
  600         if (status == 0)
  601                 return;
  602         if (status & ETH_ISR_RCOM) {
  603                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  604                     BUS_DMASYNC_POSTREAD);
  605                 while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
  606                         i = sc->rx_buf_ptr;
  607                         sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
  608                         bp = sc->rx_buf[i];
  609                         rx_stat = sc->rx_descs[i].status;
  610                         if ((rx_stat & ETH_LEN_MASK) == 0) {
  611                                 printf("ignoring bogus 0 len packet\n");
  612                                 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  613                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  614                                     BUS_DMASYNC_PREWRITE);
  615                                 continue;
  616                         }
  617                         /* Flush memory for mbuf so we don't get stale bytes */
  618                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  619                             BUS_DMASYNC_POSTREAD);
  620                         WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));     // XXX WHY? XXX imp
  621                         /*
  622                          * The length returned by the device includes the
  623                          * ethernet CRC calculation for the packet, but
  624                          * ifnet drivers are supposed to discard it.
  625                          */
  626                         mb = m_devget(sc->rx_buf[i],
  627                             (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
  628                             ETHER_ALIGN, sc->ifp, NULL);
  629                         sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  630                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  631                             BUS_DMASYNC_PREWRITE);
  632                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  633                             BUS_DMASYNC_PREREAD);
  634                         if (mb != NULL)
  635                                 (*sc->ifp->if_input)(sc->ifp, mb);
  636                 }
  637         }
  638         if (status & ETH_ISR_TCOM) {
  639                 ATE_LOCK(sc);
  640                 if (sc->sent_mbuf[0]) {
  641                         m_freem(sc->sent_mbuf[0]);
  642                         sc->sent_mbuf[0] = NULL;
  643                 }
  644                 if (sc->sent_mbuf[1]) {
  645                         if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
  646                                 m_freem(sc->sent_mbuf[1]);
  647                                 sc->txcur = 0;
  648                                 sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
  649                         } else {
  650                                 sc->sent_mbuf[0] = sc->sent_mbuf[1];
  651                                 sc->sent_mbuf[1] = NULL;
  652                                 sc->txcur = 1;
  653                         }
  654                 } else {
  655                         sc->sent_mbuf[0] = NULL;
  656                         sc->txcur = 0;
  657                 }
  658                 /*
  659                  * We're no longer busy, so clear the busy flag and call the
  660                  * start routine to xmit more packets.
  661                  */
  662                 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  663                 atestart_locked(sc->ifp);
  664                 ATE_UNLOCK(sc);
  665         }
  666         if (status & ETH_ISR_RBNA) {
  667                 printf("RBNA workaround\n");
  668                 /* Workaround Errata #11 */
  669                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
  670                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
  671         }
  672 }
  673 
  674 /*
  675  * Reset and initialize the chip
  676  */
  677 static void
  678 ateinit_locked(void *xsc)
  679 {
  680         struct ate_softc *sc = xsc;
  681         struct ifnet *ifp = sc->ifp;
  682 
  683         ATE_ASSERT_LOCKED(sc);
  684 
  685         /*
  686          * XXX TODO(3)
  687          * we need to turn on the EMAC clock in the pmc.  With the
  688          * default boot loader, this is already turned on.  However, we
  689          * need to think about how best to turn it on/off as the interface
  690          * is brought up/down, as well as dealing with the mii bus...
  691          *
  692          * We also need to multiplex the pins correctly.
  693          */
  694 
  695         /*
  696          * There are two different ways that the mii bus is connected
  697          * to this chip.  Select the right one based on a compile-time
  698          * option.
  699          */
  700         if (sc->use_rmii)
  701                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
  702         else
  703                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
  704 
  705         /*
  706          * Turn on the multicast hash, and write 0's to it.
  707          */
  708         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI);
  709         WR4(sc, ETH_HSH, 0);
  710         WR4(sc, ETH_HSL, 0);
  711 
  712         WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
  713         WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
  714 
  715         /*
  716          * Boot loader fills in MAC address.  If that's not the case, then
  717          * we should set SA1L and SA1H here to the appropriate value.  Note:
  718          * the byte order is big endian, not little endian, so we have some
  719          * swapping to do.  Again, if we need it (which I don't think we do).
  720          */
  721         ate_setmcast(sc);
  722 
  723         /*
  724          * Set 'running' flag, and clear output active flag
  725          * and attempt to start the output
  726          */
  727         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  728         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  729         atestart_locked(ifp);
  730 
  731         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  732 }
  733 
  734 /*
  735  * dequeu packets and transmit
  736  */
  737 static void
  738 atestart_locked(struct ifnet *ifp)
  739 {
  740         struct ate_softc *sc = ifp->if_softc;
  741         struct mbuf *m, *mdefrag;
  742         bus_dma_segment_t segs[1];
  743         int nseg;
  744 
  745         ATE_ASSERT_LOCKED(sc);
  746         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
  747                 return;
  748 
  749         while (sc->txcur < ATE_MAX_TX_BUFFERS) {
  750                 /*
  751                  * check to see if there's room to put another packet into the
  752                  * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
  753                  * packets.  We use OACTIVE to indicate "we can stuff more into
  754                  * our buffers (clear) or not (set)."
  755                  */
  756                 if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
  757                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  758                         return;
  759                 }
  760                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
  761                 if (m == 0) {
  762                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  763                         return;
  764                 }
  765                 mdefrag = m_defrag(m, M_DONTWAIT);
  766                 if (mdefrag == NULL) {
  767                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
  768                         return;
  769                 }
  770                 m = mdefrag;
  771                 if (bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
  772                     segs, &nseg, 0) != 0) {
  773                         m_freem(m);
  774                         continue;
  775                 }
  776                 bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
  777                     BUS_DMASYNC_PREWRITE);
  778 
  779                 /*
  780                  * tell the hardware to xmit the packet.
  781                  */
  782                 WR4(sc, ETH_TAR, segs[0].ds_addr);
  783                 WR4(sc, ETH_TCR, segs[0].ds_len);
  784         
  785                 /*
  786                  * Tap off here if there is a bpf listener.
  787                  */
  788                 BPF_MTAP(ifp, m);
  789 
  790                 sc->sent_mbuf[sc->txcur] = m;
  791                 sc->txcur++;
  792         }
  793 }
  794 
  795 static void
  796 ateinit(void *xsc)
  797 {
  798         struct ate_softc *sc = xsc;
  799         ATE_LOCK(sc);
  800         ateinit_locked(sc);
  801         ATE_UNLOCK(sc);
  802 }
  803 
  804 static void
  805 atestart(struct ifnet *ifp)
  806 {
  807         struct ate_softc *sc = ifp->if_softc;
  808         ATE_LOCK(sc);
  809         atestart_locked(ifp);
  810         ATE_UNLOCK(sc);
  811 }
  812 
  813 /*
  814  * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
  815  * so be careful.
  816  */
  817 static void
  818 atestop(struct ate_softc *sc)
  819 {
  820         struct ifnet *ifp = sc->ifp;
  821 
  822         if (ifp) {
  823                 ifp->if_timer = 0;
  824                 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  825         }
  826 
  827         callout_stop(&sc->tick_ch);
  828 
  829         /*
  830          * Enable some parts of the MAC that are needed always (like the
  831          * MII bus.  This turns off the RE and TE bits, which will remain
  832          * off until ateinit() is called to turn them on.  With RE and TE
  833          * turned off, there's no DMA to worry about after this write.
  834          */
  835         WR4(sc, ETH_CTL, ETH_CTL_MPE);
  836 
  837         /*
  838          * Turn off all the configured options and revert to defaults.
  839          */
  840         WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
  841 
  842         /*
  843          * Turn off all the interrupts, and ack any pending ones by reading
  844          * the ISR.
  845          */
  846         WR4(sc, ETH_IDR, 0xffffffff);
  847         RD4(sc, ETH_ISR);
  848 
  849         /*
  850          * Clear out the Transmit and Receiver Status registers of any
  851          * errors they may be reporting
  852          */
  853         WR4(sc, ETH_TSR, 0xffffffff);
  854         WR4(sc, ETH_RSR, 0xffffffff);
  855 
  856         /*
  857          * XXX TODO(8)
  858          * need to worry about the busdma resources?  Yes, I think we need
  859          * to sync and unload them.  We may also need to release the mbufs
  860          * that are assocaited with RX and TX operations.
  861          */
  862 
  863         /*
  864          * XXX we should power down the EMAC if it isn't in use, after
  865          * putting it into loopback mode.  This saves about 400uA according
  866          * to the datasheet.
  867          */
  868 }
  869 
  870 static void
  871 atewatchdog(struct ifnet *ifp)
  872 {
  873         struct ate_softc *sc = ifp->if_softc;
  874 
  875         ATE_LOCK(sc);
  876         device_printf(sc->dev, "Device timeout\n");
  877         ifp->if_oerrors++;
  878         ateinit_locked(sc);
  879         ATE_UNLOCK(sc);
  880 }
  881 
  882 static int
  883 ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  884 {
  885         struct ate_softc *sc = ifp->if_softc;
  886         struct mii_data *mii;
  887         struct ifreq *ifr = (struct ifreq *)data;       
  888         int             error = 0;
  889 
  890         switch (cmd) {
  891         case SIOCSIFFLAGS:
  892                 ATE_LOCK(sc);
  893                 if ((ifp->if_flags & IFF_UP) == 0 &&
  894                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
  895                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  896                         atestop(sc);
  897                 } else {
  898                         /* reinitialize card on any parameter change */
  899                         ateinit_locked(sc);
  900                 }
  901                 ATE_UNLOCK(sc);
  902                 break;
  903 
  904         case SIOCADDMULTI:
  905         case SIOCDELMULTI:
  906                 /* update multicast filter list. */
  907                 ATE_LOCK(sc);
  908                 ate_setmcast(sc);
  909                 ATE_UNLOCK(sc);
  910                 error = 0;
  911                 break;
  912 
  913         case SIOCSIFMEDIA:
  914         case SIOCGIFMEDIA:
  915                 mii = device_get_softc(sc->miibus);
  916                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
  917                 break;
  918         default:
  919                 error = ether_ioctl(ifp, cmd, data);
  920                 break;
  921         }
  922         return (error);
  923 }
  924 
  925 static void
  926 ate_child_detached(device_t dev, device_t child)
  927 {
  928         struct ate_softc *sc;
  929 
  930         sc = device_get_softc(dev);
  931         if (child == sc->miibus)
  932                 sc->miibus = NULL;
  933 }
  934 
  935 /*
  936  * MII bus support routines.
  937  */
  938 static int
  939 ate_miibus_readreg(device_t dev, int phy, int reg)
  940 {
  941         struct ate_softc *sc;
  942         int val;
  943 
  944         /*
  945          * XXX if we implement agressive power savings, then we need
  946          * XXX to make sure that the clock to the emac is on here
  947          */
  948 
  949         if (phy != 0)
  950                 return (0xffff);
  951         sc = device_get_softc(dev);
  952         DELAY(1);       /* Hangs w/o this delay really 30.5us atm */
  953         WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
  954         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
  955                 continue;
  956         val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
  957 
  958         return (val);
  959 }
  960 
  961 static void
  962 ate_miibus_writereg(device_t dev, int phy, int reg, int data)
  963 {
  964         struct ate_softc *sc;
  965         
  966         /*
  967          * XXX if we implement agressive power savings, then we need
  968          * XXX to make sure that the clock to the emac is on here
  969          */
  970 
  971         sc = device_get_softc(dev);
  972         WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
  973         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
  974                 continue;
  975         return;
  976 }
  977 
  978 static device_method_t ate_methods[] = {
  979         /* Device interface */
  980         DEVMETHOD(device_probe,         ate_probe),
  981         DEVMETHOD(device_attach,        ate_attach),
  982         DEVMETHOD(device_detach,        ate_detach),
  983 
  984         /* Bus interface */
  985         DEVMETHOD(bus_child_detached,   ate_child_detached),
  986 
  987         /* MII interface */
  988         DEVMETHOD(miibus_readreg,       ate_miibus_readreg),
  989         DEVMETHOD(miibus_writereg,      ate_miibus_writereg),
  990 
  991         { 0, 0 }
  992 };
  993 
  994 static driver_t ate_driver = {
  995         "ate",
  996         ate_methods,
  997         sizeof(struct ate_softc),
  998 };
  999 
 1000 DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
 1001 DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
 1002 MODULE_DEPEND(ate, miibus, 1, 1, 1);
 1003 MODULE_DEPEND(ate, ether, 1, 1, 1);

Cache object: 52ffbce31909fb615a036767b05eef36


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.