The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/at91/if_ate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 /* TODO: (in no order)
   26  *
   27  * 8) Need to sync busdma goo in atestop
   28  * 9) atestop should maybe free the mbufs?
   29  *
   30  * 1) detach
   31  * 2) Free dma setup
   32  * 3) Turn on the clock in pmc?  Turn off?
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/7.3/sys/arm/at91/if_ate.c 187398 2009-01-18 17:07:48Z stas $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/kernel.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/socket.h>
   47 #include <sys/sockio.h>
   48 #include <sys/sysctl.h>
   49 #include <machine/bus.h>
   50 
   51 #include <net/ethernet.h>
   52 #include <net/if.h>
   53 #include <net/if_arp.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_mib.h>
   57 #include <net/if_types.h>
   58 
   59 #ifdef INET
   60 #include <netinet/in.h>
   61 #include <netinet/in_systm.h>
   62 #include <netinet/in_var.h>
   63 #include <netinet/ip.h>
   64 #endif
   65 
   66 #include <net/bpf.h>
   67 #include <net/bpfdesc.h>
   68 
   69 #include <dev/mii/mii.h>
   70 #include <dev/mii/miivar.h>
   71 #include <arm/at91/if_atereg.h>
   72 
   73 #include "miibus_if.h"
   74 
   75 #define ATE_MAX_TX_BUFFERS 2            /* We have ping-pong tx buffers */
   76 #define ATE_MAX_RX_BUFFERS 64
   77 
   78 struct ate_softc
   79 {
   80         struct ifnet *ifp;              /* ifnet pointer */
   81         struct mtx sc_mtx;              /* basically a perimeter lock */
   82         device_t dev;                   /* Myself */
   83         device_t miibus;                /* My child miibus */
   84         void *intrhand;                 /* Interrupt handle */
   85         struct resource *irq_res;       /* IRQ resource */
   86         struct resource *mem_res;       /* Memory resource */
   87         struct callout tick_ch;         /* Tick callout */
   88         bus_dma_tag_t mtag;             /* bus dma tag for mbufs */
   89         bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
   90         struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
   91         bus_dma_tag_t rxtag;
   92         bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
   93         void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
   94         int rx_buf_ptr;
   95         bus_dma_tag_t rx_desc_tag;
   96         bus_dmamap_t rx_desc_map;
   97         int txcur;                      /* current tx map pointer */
   98         bus_addr_t rx_desc_phys;
   99         eth_rx_desc_t *rx_descs;
  100         int use_rmii;
  101         struct  ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
  102 };
  103 
  104 static inline uint32_t
  105 RD4(struct ate_softc *sc, bus_size_t off)
  106 {
  107         return bus_read_4(sc->mem_res, off);
  108 }
  109 
  110 static inline void
  111 WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
  112 {
  113         bus_write_4(sc->mem_res, off, val);
  114 }
  115 
  116 #define ATE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  117 #define ATE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  118 #define ATE_LOCK_INIT(_sc) \
  119         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
  120             MTX_NETWORK_LOCK, MTX_DEF)
  121 #define ATE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  122 #define ATE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  123 #define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  124 
  125 static devclass_t ate_devclass;
  126 
  127 /* ifnet entry points */
  128 
  129 static void ateinit_locked(void *);
  130 static void atestart_locked(struct ifnet *);
  131 
  132 static void ateinit(void *);
  133 static void atestart(struct ifnet *);
  134 static void atestop(struct ate_softc *);
  135 static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
  136 
  137 /* bus entry points */
  138 
  139 static int ate_probe(device_t dev);
  140 static int ate_attach(device_t dev);
  141 static int ate_detach(device_t dev);
  142 static void ate_intr(void *);
  143 
  144 /* helper routines */
  145 static int ate_activate(device_t dev);
  146 static void ate_deactivate(device_t dev);
  147 static int ate_ifmedia_upd(struct ifnet *ifp);
  148 static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
  149 static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
  150 static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
  151 
  152 /*
  153  * The AT91 family of products has the ethernet called EMAC.  However,
  154  * it isn't self identifying.  It is anticipated that the parent bus
  155  * code will take care to only add ate devices where they really are.  As
  156  * such, we do nothing here to identify the device and just set its name.
  157  */
  158 static int
  159 ate_probe(device_t dev)
  160 {
  161         device_set_desc(dev, "EMAC");
  162         return (0);
  163 }
  164 
  165 static int
  166 ate_attach(device_t dev)
  167 {
  168         struct ate_softc *sc = device_get_softc(dev);
  169         struct ifnet *ifp = NULL;
  170         struct sysctl_ctx_list *sctx;
  171         struct sysctl_oid *soid;
  172         int err;
  173         u_char eaddr[ETHER_ADDR_LEN];
  174         uint32_t rnd;
  175 
  176         sc->dev = dev;
  177         err = ate_activate(dev);
  178         if (err)
  179                 goto out;
  180 
  181         sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
  182 
  183         /* Sysctls */
  184         sctx = device_get_sysctl_ctx(dev);
  185         soid = device_get_sysctl_tree(dev);
  186         SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
  187             CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
  188 
  189         /* calling atestop before ifp is set is OK */
  190         atestop(sc);
  191         ATE_LOCK_INIT(sc);
  192         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  193 
  194         if ((err = ate_get_mac(sc, eaddr)) != 0) {
  195                 /*
  196                  * No MAC address configured. Generate the random one.
  197                  */
  198                 if  (bootverbose)
  199                         device_printf(dev,
  200                             "Generating random ethernet address.\n");
  201                 rnd = arc4random();
  202 
  203                 /*
  204                  * Set OUI to convenient locally assigned address.  'b'
  205                  * is 0x62, which has the locally assigned bit set, and
  206                  * the broadcast/multicast bit clear.
  207                  */
  208                 eaddr[0] = 'b';
  209                 eaddr[1] = 's';
  210                 eaddr[2] = 'd';
  211                 eaddr[3] = (rnd >> 16) & 0xff;
  212                 eaddr[4] = (rnd >> 8) & 0xff;
  213                 eaddr[5] = rnd & 0xff;
  214         }
  215         ate_set_mac(sc, eaddr);
  216 
  217         sc->ifp = ifp = if_alloc(IFT_ETHER);
  218         if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
  219                 device_printf(dev, "Cannot find my PHY.\n");
  220                 err = ENXIO;
  221                 goto out;
  222         }
  223 
  224         ifp->if_softc = sc;
  225         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  226         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  227         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  228         ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
  229         ifp->if_start = atestart;
  230         ifp->if_ioctl = ateioctl;
  231         ifp->if_init = ateinit;
  232         ifp->if_baudrate = 10000000;
  233         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
  234         ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
  235         IFQ_SET_READY(&ifp->if_snd);
  236         ifp->if_timer = 0;
  237         ifp->if_linkmib = &sc->mibdata;
  238         ifp->if_linkmiblen = sizeof(sc->mibdata);
  239         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
  240 
  241         ether_ifattach(ifp, eaddr);
  242 
  243         /*
  244          * Activate the interrupt
  245          */
  246         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
  247             NULL, ate_intr, sc, &sc->intrhand);
  248         if (err) {
  249                 ether_ifdetach(ifp);
  250                 ATE_LOCK_DESTROY(sc);
  251         }
  252 out:;
  253         if (err)
  254                 ate_deactivate(dev);
  255         if (err && ifp)
  256                 if_free(ifp);
  257         return (err);
  258 }
  259 
  260 static int
  261 ate_detach(device_t dev)
  262 {
  263         return EBUSY;   /* XXX TODO(1) */
  264 }
  265 
  266 static void
  267 ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  268 {
  269         struct ate_softc *sc;
  270 
  271         if (error != 0)
  272                 return;
  273         sc = (struct ate_softc *)arg;
  274         sc->rx_desc_phys = segs[0].ds_addr;
  275 }
  276 
  277 static void
  278 ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  279 {
  280         struct ate_softc *sc;
  281         int i;
  282 
  283         if (error != 0)
  284                 return;
  285         sc = (struct ate_softc *)arg;
  286         i = sc->rx_buf_ptr;
  287 
  288         /*
  289          * For the last buffer, set the wrap bit so the controller
  290          * restarts from the first descriptor.
  291          */
  292         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  293         if (i == ATE_MAX_RX_BUFFERS - 1)
  294                 sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
  295         else
  296                 sc->rx_descs[i].addr = segs[0].ds_addr;
  297         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
  298         sc->rx_descs[i].status = 0;
  299         /* Flush the memory in the mbuf */
  300         bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
  301 }
  302 
  303 /*
  304  * Compute the multicast filter for this device using the standard
  305  * algorithm.  I wonder why this isn't in ether somewhere as a lot
  306  * of different MAC chips use this method (or the reverse the bits)
  307  * method.
  308  */
  309 static void
  310 ate_setmcast(struct ate_softc *sc)
  311 {
  312         uint32_t index;
  313         uint32_t mcaf[2];
  314         u_char *af = (u_char *) mcaf;
  315         struct ifmultiaddr *ifma;
  316 
  317         mcaf[0] = 0;
  318         mcaf[1] = 0;
  319 
  320         IF_ADDR_LOCK(sc->ifp);
  321         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
  322                 if (ifma->ifma_addr->sa_family != AF_LINK)
  323                         continue;
  324                 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  325                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
  326                 af[index >> 3] |= 1 << (index & 7);
  327         }
  328         IF_ADDR_UNLOCK(sc->ifp);
  329 
  330         /*
  331          * Write the hash to the hash register.  This card can also
  332          * accept unicast packets as well as multicast packets using this
  333          * register for easier bridging operations, but we don't take
  334          * advantage of that.  Locks here are to avoid LOR with the
  335          * IF_ADDR_LOCK, but might not be strictly necessary.
  336          */
  337         WR4(sc, ETH_HSL, mcaf[0]);
  338         WR4(sc, ETH_HSH, mcaf[1]);
  339 }
  340 
  341 static int
  342 ate_activate(device_t dev)
  343 {
  344         struct ate_softc *sc;
  345         int rid, err, i;
  346 
  347         sc = device_get_softc(dev);
  348         rid = 0;
  349         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  350             RF_ACTIVE);
  351         if (sc->mem_res == NULL)
  352                 goto errout;
  353         rid = 0;
  354         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  355             RF_ACTIVE);
  356         if (sc->irq_res == NULL)
  357                 goto errout;
  358 
  359         /*
  360          * Allocate DMA tags and maps
  361          */
  362         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  363             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  364             busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
  365         if (err != 0)
  366                 goto errout;
  367         for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
  368                 err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
  369                 if (err != 0)
  370                         goto errout;
  371         }
  372          /*
  373           * Allocate our Rx buffers.  This chip has a rx structure that's filled
  374           * in
  375           */
  376         
  377         /*
  378          * Allocate DMA tags and maps for RX.
  379          */
  380         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  381             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  382             busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
  383         if (err != 0)
  384                 goto errout;
  385 
  386         /* Dma TAG and MAP for the rx descriptors. */
  387         err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0, 
  388             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  389             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
  390             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
  391             &sc->sc_mtx, &sc->rx_desc_tag);
  392         if (err != 0)
  393                 goto errout;
  394         if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
  395             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
  396                 goto errout;
  397         if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
  398             sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
  399             ate_getaddr, sc, 0) != 0)
  400                 goto errout;
  401         /* XXX TODO(5) Put this in ateinit_locked? */
  402         for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
  403                 sc->rx_buf_ptr = i;
  404                 if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
  405                       BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
  406                         goto errout;
  407                 if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
  408                     MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
  409                         goto errout;
  410         }
  411         sc->rx_buf_ptr = 0;
  412         /* Flush the memory for the EMAC rx descriptor */
  413         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  414         /* Write the descriptor queue address. */
  415         WR4(sc, ETH_RBQP, sc->rx_desc_phys);
  416         return (0);
  417 errout:
  418         ate_deactivate(dev);
  419         return (ENOMEM);
  420 }
  421 
  422 static void
  423 ate_deactivate(device_t dev)
  424 {
  425         struct ate_softc *sc;
  426 
  427         sc = device_get_softc(dev);
  428         /* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
  429 #if 0
  430         if (sc->fxp_mtag) {
  431                 for (i = 0; i < FXP_NRFABUFS; i++) {
  432                         rxp = &sc->fxp_desc.rx_list[i];
  433                         if (rxp->rx_mbuf != NULL) {
  434                                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
  435                                     BUS_DMASYNC_POSTREAD);
  436                                 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
  437                                 m_freem(rxp->rx_mbuf);
  438                         }
  439                         bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
  440                 }
  441                 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
  442                 for (i = 0; i < FXP_NTXCB; i++) {
  443                         txp = &sc->fxp_desc.tx_list[i];
  444                         if (txp->tx_mbuf != NULL) {
  445                                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
  446                                     BUS_DMASYNC_POSTWRITE);
  447                                 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
  448                                 m_freem(txp->tx_mbuf);
  449                         }
  450                         bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
  451                 }
  452                 bus_dma_tag_destroy(sc->fxp_mtag);
  453         }
  454         if (sc->fxp_stag)
  455                 bus_dma_tag_destroy(sc->fxp_stag);
  456         if (sc->cbl_tag)
  457                 bus_dma_tag_destroy(sc->cbl_tag);
  458         if (sc->mcs_tag)
  459                 bus_dma_tag_destroy(sc->mcs_tag);
  460 #endif
  461         if (sc->intrhand)
  462                 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
  463         sc->intrhand = 0;
  464         bus_generic_detach(sc->dev);
  465         if (sc->miibus)
  466                 device_delete_child(sc->dev, sc->miibus);
  467         if (sc->mem_res)
  468                 bus_release_resource(dev, SYS_RES_IOPORT,
  469                     rman_get_rid(sc->mem_res), sc->mem_res);
  470         sc->mem_res = 0;
  471         if (sc->irq_res)
  472                 bus_release_resource(dev, SYS_RES_IRQ,
  473                     rman_get_rid(sc->irq_res), sc->irq_res);
  474         sc->irq_res = 0;
  475         return;
  476 }
  477 
  478 /*
  479  * Change media according to request.
  480  */
  481 static int
  482 ate_ifmedia_upd(struct ifnet *ifp)
  483 {
  484         struct ate_softc *sc = ifp->if_softc;
  485         struct mii_data *mii;
  486 
  487         mii = device_get_softc(sc->miibus);
  488         ATE_LOCK(sc);
  489         mii_mediachg(mii);
  490         ATE_UNLOCK(sc);
  491         return (0);
  492 }
  493 
  494 /*
  495  * Notify the world which media we're using.
  496  */
  497 static void
  498 ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  499 {
  500         struct ate_softc *sc = ifp->if_softc;
  501         struct mii_data *mii;
  502 
  503         mii = device_get_softc(sc->miibus);
  504         ATE_LOCK(sc);
  505         mii_pollstat(mii);
  506         ifmr->ifm_active = mii->mii_media_active;
  507         ifmr->ifm_status = mii->mii_media_status;
  508         ATE_UNLOCK(sc);
  509 }
  510 
  511 static void
  512 ate_stat_update(struct ate_softc *sc, int active)
  513 {
  514         /*
  515          * The speed and full/half-duplex state needs to be reflected
  516          * in the ETH_CFG register.
  517          */
  518         if (IFM_SUBTYPE(active) == IFM_10_T)
  519                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
  520         else
  521                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
  522         if (active & IFM_FDX)
  523                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
  524         else
  525                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
  526 }
  527 
  528 static void
  529 ate_tick(void *xsc)
  530 {
  531         struct ate_softc *sc = xsc;
  532         struct ifnet *ifp = sc->ifp;
  533         struct mii_data *mii;
  534         int active;
  535         uint32_t c;
  536 
  537         /*
  538          * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
  539          * the MII if there's a link if this bit is clear.  Not sure if we
  540          * should do the same thing here or not.
  541          */
  542         ATE_ASSERT_LOCKED(sc);
  543         if (sc->miibus != NULL) {
  544                 mii = device_get_softc(sc->miibus);
  545                 active = mii->mii_media_active;
  546                 mii_tick(mii);
  547                 if (mii->mii_media_status & IFM_ACTIVE &&
  548                      active != mii->mii_media_active)
  549                         ate_stat_update(sc, mii->mii_media_active);
  550         }
  551 
  552         /*
  553          * Update the stats as best we can.  When we're done, clear
  554          * the status counters and start over.  We're supposed to read these
  555          * registers often enough that they won't overflow.  Hopefully
  556          * once a second is often enough.  Some don't map well to
  557          * the dot3Stats mib, so for those we just count them as general
  558          * errors.  Stats for iframes, ibutes, oframes and obytes are
  559          * collected elsewhere.  These registers zero on a read to prevent
  560          * races.  For all the collision stats, also update the collision
  561          * stats for the interface.
  562          */
  563         sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
  564         sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
  565         c = RD4(sc, ETH_SCOL);
  566         ifp->if_collisions += c;
  567         sc->mibdata.dot3StatsSingleCollisionFrames += c;
  568         c = RD4(sc, ETH_MCOL);
  569         sc->mibdata.dot3StatsMultipleCollisionFrames += c;
  570         ifp->if_collisions += c;
  571         sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
  572         sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
  573         c = RD4(sc, ETH_LCOL);
  574         sc->mibdata.dot3StatsLateCollisions += c;
  575         ifp->if_collisions += c;
  576         c = RD4(sc, ETH_ECOL);
  577         sc->mibdata.dot3StatsExcessiveCollisions += c;
  578         ifp->if_collisions += c;
  579         sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
  580         sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
  581         sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
  582         /*
  583          * not sure where to lump these, so count them against the errors
  584          * for the interface.
  585          */
  586         sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
  587         sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
  588             RD4(sc, ETH_USF);
  589 
  590         /*
  591          * Schedule another timeout one second from now.
  592          */
  593         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  594 }
  595 
  596 static void
  597 ate_set_mac(struct ate_softc *sc, u_char *eaddr)
  598 {
  599         WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
  600             (eaddr[1] << 8) | eaddr[0]);
  601         WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
  602 }
  603 
  604 static int
  605 ate_get_mac(struct ate_softc *sc, u_char *eaddr)
  606 {
  607         bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
  608         bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
  609         uint32_t low, high;
  610         int i;
  611 
  612         /*
  613          * The boot loader setup the MAC with an address, if one is set in
  614          * the loader. Grab one MAC address from the SA[1-4][HL] registers.
  615          */
  616         for (i = 0; i < 4; i++) {
  617                 low = RD4(sc, sa_low_reg[i]);
  618                 high = RD4(sc, sa_high_reg[i]);
  619                 if ((low | (high & 0xffff)) != 0) {
  620                         eaddr[0] = low & 0xff;
  621                         eaddr[1] = (low >> 8) & 0xff;
  622                         eaddr[2] = (low >> 16) & 0xff;
  623                         eaddr[3] = (low >> 24) & 0xff;
  624                         eaddr[4] = high & 0xff;
  625                         eaddr[5] = (high >> 8) & 0xff;
  626                         return (0);
  627                 }
  628         }
  629         return (ENXIO);
  630 }
  631 
  632 static void
  633 ate_intr(void *xsc)
  634 {
  635         struct ate_softc *sc = xsc;
  636         struct ifnet *ifp = sc->ifp;
  637         int status;
  638         int i;
  639         void *bp;
  640         struct mbuf *mb;
  641         uint32_t rx_stat;
  642 
  643         status = RD4(sc, ETH_ISR);
  644         if (status == 0)
  645                 return;
  646         if (status & ETH_ISR_RCOM) {
  647                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  648                     BUS_DMASYNC_POSTREAD);
  649                 while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
  650                         i = sc->rx_buf_ptr;
  651                         sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
  652                         bp = sc->rx_buf[i];
  653                         rx_stat = sc->rx_descs[i].status;
  654                         if ((rx_stat & ETH_LEN_MASK) == 0) {
  655                                 printf("ignoring bogus 0 len packet\n");
  656                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  657                                     BUS_DMASYNC_PREWRITE);
  658                                 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  659                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  660                                     BUS_DMASYNC_POSTWRITE);
  661                                 continue;
  662                         }
  663                         /* Flush memory for mbuf so we don't get stale bytes */
  664                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  665                             BUS_DMASYNC_POSTREAD);
  666                         WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
  667 
  668                         /*
  669                          * The length returned by the device includes the
  670                          * ethernet CRC calculation for the packet, but
  671                          * ifnet drivers are supposed to discard it.
  672                          */
  673                         mb = m_devget(sc->rx_buf[i],
  674                             (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
  675                             ETHER_ALIGN, ifp, NULL);
  676                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  677                             BUS_DMASYNC_PREWRITE);
  678                         sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  679                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  680                             BUS_DMASYNC_POSTWRITE);
  681                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  682                             BUS_DMASYNC_PREREAD);
  683                         if (mb != NULL) {
  684                                 ifp->if_ipackets++;
  685                                 (*ifp->if_input)(ifp, mb);
  686                         }
  687                         
  688                 }
  689         }
  690         if (status & ETH_ISR_TCOM) {
  691                 ATE_LOCK(sc);
  692                 /* XXX TSR register should be cleared */
  693                 if (sc->sent_mbuf[0]) {
  694                         bus_dmamap_sync(sc->mtag, sc->tx_map[0],
  695                             BUS_DMASYNC_POSTWRITE);
  696                         m_freem(sc->sent_mbuf[0]);
  697                         ifp->if_opackets++;
  698                         sc->sent_mbuf[0] = NULL;
  699                 }
  700                 if (sc->sent_mbuf[1]) {
  701                         if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
  702                                 bus_dmamap_sync(sc->mtag, sc->tx_map[1],
  703                                     BUS_DMASYNC_POSTWRITE);
  704                                 m_freem(sc->sent_mbuf[1]);
  705                                 ifp->if_opackets++;
  706                                 sc->txcur = 0;
  707                                 sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
  708                         } else {
  709                                 sc->sent_mbuf[0] = sc->sent_mbuf[1];
  710                                 sc->sent_mbuf[1] = NULL;
  711                                 sc->txcur = 1;
  712                         }
  713                 } else {
  714                         sc->sent_mbuf[0] = NULL;
  715                         sc->txcur = 0;
  716                 }
  717                 /*
  718                  * We're no longer busy, so clear the busy flag and call the
  719                  * start routine to xmit more packets.
  720                  */
  721                 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  722                 atestart_locked(sc->ifp);
  723                 ATE_UNLOCK(sc);
  724         }
  725         if (status & ETH_ISR_RBNA) {
  726                 printf("RBNA workaround\n");
  727                 /* Workaround Errata #11 */
  728                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
  729                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
  730         }
  731 }
  732 
  733 /*
  734  * Reset and initialize the chip
  735  */
  736 static void
  737 ateinit_locked(void *xsc)
  738 {
  739         struct ate_softc *sc = xsc;
  740         struct ifnet *ifp = sc->ifp;
  741         struct mii_data *mii;
  742 
  743         ATE_ASSERT_LOCKED(sc);
  744 
  745         /*
  746          * XXX TODO(3)
  747          * we need to turn on the EMAC clock in the pmc.  With the
  748          * default boot loader, this is already turned on.  However, we
  749          * need to think about how best to turn it on/off as the interface
  750          * is brought up/down, as well as dealing with the mii bus...
  751          *
  752          * We also need to multiplex the pins correctly.
  753          */
  754 
  755         /*
  756          * There are two different ways that the mii bus is connected
  757          * to this chip.  Select the right one based on a compile-time
  758          * option.
  759          */
  760         if (sc->use_rmii)
  761                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
  762         else
  763                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
  764 
  765         /*
  766          * Turn on the multicast hash, and write 0's to it.
  767          */
  768         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI);
  769         WR4(sc, ETH_HSH, 0);
  770         WR4(sc, ETH_HSL, 0);
  771 
  772         WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
  773         WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
  774 
  775         /*
  776          * Boot loader fills in MAC address.  If that's not the case, then
  777          * we should set SA1L and SA1H here to the appropriate value.  Note:
  778          * the byte order is big endian, not little endian, so we have some
  779          * swapping to do.  Again, if we need it (which I don't think we do).
  780          */
  781         ate_setmcast(sc);
  782 
  783         /* enable big packets */
  784         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  785 
  786         /*
  787          * Set 'running' flag, and clear output active flag
  788          * and attempt to start the output
  789          */
  790         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  791         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  792 
  793         mii = device_get_softc(sc->miibus);
  794         mii_pollstat(mii);
  795         ate_stat_update(sc, mii->mii_media_active);
  796         atestart_locked(ifp);
  797 
  798         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  799 }
  800 
  801 /*
  802  * dequeu packets and transmit
  803  */
  804 static void
  805 atestart_locked(struct ifnet *ifp)
  806 {
  807         struct ate_softc *sc = ifp->if_softc;
  808         struct mbuf *m, *mdefrag;
  809         bus_dma_segment_t segs[1];
  810         int nseg, e;
  811 
  812         ATE_ASSERT_LOCKED(sc);
  813         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
  814                 return;
  815 
  816         while (sc->txcur < ATE_MAX_TX_BUFFERS) {
  817                 /*
  818                  * check to see if there's room to put another packet into the
  819                  * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
  820                  * packets.  We use OACTIVE to indicate "we can stuff more into
  821                  * our buffers (clear) or not (set)."
  822                  */
  823                 if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
  824                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  825                         return;
  826                 }
  827                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
  828                 if (m == 0) {
  829                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  830                         return;
  831                 }
  832                 e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
  833                     segs, &nseg, 0);
  834                 if (e == EFBIG) {
  835                         mdefrag = m_defrag(m, M_DONTWAIT);
  836                         if (mdefrag == NULL) {
  837                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
  838                                 return;
  839                         }
  840                         m = mdefrag;
  841                         e = bus_dmamap_load_mbuf_sg(sc->mtag,
  842                             sc->tx_map[sc->txcur], m, segs, &nseg, 0);
  843                 }
  844                 if (e != 0) {
  845                         m_freem(m);
  846                         continue;
  847                 }
  848                 bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
  849                     BUS_DMASYNC_PREWRITE);
  850 
  851                 /*
  852                  * tell the hardware to xmit the packet.
  853                  */
  854                 WR4(sc, ETH_TAR, segs[0].ds_addr);
  855                 WR4(sc, ETH_TCR, segs[0].ds_len);
  856         
  857                 /*
  858                  * Tap off here if there is a bpf listener.
  859                  */
  860                 BPF_MTAP(ifp, m);
  861 
  862                 sc->sent_mbuf[sc->txcur] = m;
  863                 sc->txcur++;
  864         }
  865 }
  866 
  867 static void
  868 ateinit(void *xsc)
  869 {
  870         struct ate_softc *sc = xsc;
  871         ATE_LOCK(sc);
  872         ateinit_locked(sc);
  873         ATE_UNLOCK(sc);
  874 }
  875 
  876 static void
  877 atestart(struct ifnet *ifp)
  878 {
  879         struct ate_softc *sc = ifp->if_softc;
  880         ATE_LOCK(sc);
  881         atestart_locked(ifp);
  882         ATE_UNLOCK(sc);
  883 }
  884 
  885 /*
  886  * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
  887  * so be careful.
  888  */
  889 static void
  890 atestop(struct ate_softc *sc)
  891 {
  892         struct ifnet *ifp = sc->ifp;
  893 
  894         if (ifp) {
  895                 ifp->if_timer = 0;
  896                 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  897         }
  898 
  899         callout_stop(&sc->tick_ch);
  900 
  901         /*
  902          * Enable some parts of the MAC that are needed always (like the
  903          * MII bus.  This turns off the RE and TE bits, which will remain
  904          * off until ateinit() is called to turn them on.  With RE and TE
  905          * turned off, there's no DMA to worry about after this write.
  906          */
  907         WR4(sc, ETH_CTL, ETH_CTL_MPE);
  908 
  909         /*
  910          * Turn off all the configured options and revert to defaults.
  911          */
  912         WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
  913 
  914         /*
  915          * Turn off all the interrupts, and ack any pending ones by reading
  916          * the ISR.
  917          */
  918         WR4(sc, ETH_IDR, 0xffffffff);
  919         RD4(sc, ETH_ISR);
  920 
  921         /*
  922          * Clear out the Transmit and Receiver Status registers of any
  923          * errors they may be reporting
  924          */
  925         WR4(sc, ETH_TSR, 0xffffffff);
  926         WR4(sc, ETH_RSR, 0xffffffff);
  927 
  928         /*
  929          * XXX TODO(8)
  930          * need to worry about the busdma resources?  Yes, I think we need
  931          * to sync and unload them.  We may also need to release the mbufs
  932          * that are assocaited with RX and TX operations.
  933          */
  934 
  935         /*
  936          * XXX we should power down the EMAC if it isn't in use, after
  937          * putting it into loopback mode.  This saves about 400uA according
  938          * to the datasheet.
  939          */
  940 }
  941 
  942 static int
  943 ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  944 {
  945         struct ate_softc *sc = ifp->if_softc;
  946         struct mii_data *mii;
  947         struct ifreq *ifr = (struct ifreq *)data;       
  948         int mask, error = 0;
  949 
  950         switch (cmd) {
  951         case SIOCSIFFLAGS:
  952                 ATE_LOCK(sc);
  953                 if ((ifp->if_flags & IFF_UP) == 0 &&
  954                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
  955                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  956                         atestop(sc);
  957                 } else {
  958                         /* reinitialize card on any parameter change */
  959                         ateinit_locked(sc);
  960                 }
  961                 ATE_UNLOCK(sc);
  962                 break;
  963 
  964         case SIOCADDMULTI:
  965         case SIOCDELMULTI:
  966                 /* update multicast filter list. */
  967                 ATE_LOCK(sc);
  968                 ate_setmcast(sc);
  969                 ATE_UNLOCK(sc);
  970                 error = 0;
  971                 break;
  972 
  973         case SIOCSIFMEDIA:
  974         case SIOCGIFMEDIA:
  975                 mii = device_get_softc(sc->miibus);
  976                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
  977                 break;
  978         case SIOCSIFCAP:
  979                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
  980                 if (mask & IFCAP_VLAN_MTU) {
  981                         ATE_LOCK(sc);
  982                         if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
  983                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  984                                 ifp->if_capenable |= IFCAP_VLAN_MTU;
  985                         } else {
  986                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
  987                                 ifp->if_capenable &= ~IFCAP_VLAN_MTU;
  988                         }
  989                         ATE_UNLOCK(sc);
  990                 }
  991         default:
  992                 error = ether_ioctl(ifp, cmd, data);
  993                 break;
  994         }
  995         return (error);
  996 }
  997 
  998 static void
  999 ate_child_detached(device_t dev, device_t child)
 1000 {
 1001         struct ate_softc *sc;
 1002 
 1003         sc = device_get_softc(dev);
 1004         if (child == sc->miibus)
 1005                 sc->miibus = NULL;
 1006 }
 1007 
 1008 /*
 1009  * MII bus support routines.
 1010  */
 1011 static int
 1012 ate_miibus_readreg(device_t dev, int phy, int reg)
 1013 {
 1014         struct ate_softc *sc;
 1015         int val;
 1016 
 1017         /*
 1018          * XXX if we implement agressive power savings, then we need
 1019          * XXX to make sure that the clock to the emac is on here
 1020          */
 1021 
 1022         sc = device_get_softc(dev);
 1023         DELAY(1);       /* Hangs w/o this delay really 30.5us atm */
 1024         WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
 1025         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1026                 continue;
 1027         val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
 1028 
 1029         return (val);
 1030 }
 1031 
 1032 static void
 1033 ate_miibus_writereg(device_t dev, int phy, int reg, int data)
 1034 {
 1035         struct ate_softc *sc;
 1036         
 1037         /*
 1038          * XXX if we implement agressive power savings, then we need
 1039          * XXX to make sure that the clock to the emac is on here
 1040          */
 1041 
 1042         sc = device_get_softc(dev);
 1043         WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
 1044         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1045                 continue;
 1046         return;
 1047 }
 1048 
 1049 static device_method_t ate_methods[] = {
 1050         /* Device interface */
 1051         DEVMETHOD(device_probe,         ate_probe),
 1052         DEVMETHOD(device_attach,        ate_attach),
 1053         DEVMETHOD(device_detach,        ate_detach),
 1054 
 1055         /* Bus interface */
 1056         DEVMETHOD(bus_child_detached,   ate_child_detached),
 1057 
 1058         /* MII interface */
 1059         DEVMETHOD(miibus_readreg,       ate_miibus_readreg),
 1060         DEVMETHOD(miibus_writereg,      ate_miibus_writereg),
 1061 
 1062         { 0, 0 }
 1063 };
 1064 
 1065 static driver_t ate_driver = {
 1066         "ate",
 1067         ate_methods,
 1068         sizeof(struct ate_softc),
 1069 };
 1070 
 1071 DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
 1072 DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
 1073 MODULE_DEPEND(ate, miibus, 1, 1, 1);
 1074 MODULE_DEPEND(ate, ether, 1, 1, 1);

Cache object: 3807c953740d2531ac16ae53ffade942


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.