The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/at91/if_ate.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   23  */
   24 
   25 /* TODO: (in no order)
   26  *
   27  * 8) Need to sync busdma goo in atestop
   28  * 9) atestop should maybe free the mbufs?
   29  *
   30  * 1) detach
   31  * 2) Free dma setup
   32  * 3) Turn on the clock in pmc?  Turn off?
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD$");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/kernel.h>
   42 #include <sys/mbuf.h>
   43 #include <sys/malloc.h>
   44 #include <sys/module.h>
   45 #include <sys/rman.h>
   46 #include <sys/socket.h>
   47 #include <sys/sockio.h>
   48 #include <sys/sysctl.h>
   49 #include <machine/bus.h>
   50 
   51 #include <net/ethernet.h>
   52 #include <net/if.h>
   53 #include <net/if_arp.h>
   54 #include <net/if_dl.h>
   55 #include <net/if_media.h>
   56 #include <net/if_mib.h>
   57 #include <net/if_types.h>
   58 
   59 #ifdef INET
   60 #include <netinet/in.h>
   61 #include <netinet/in_systm.h>
   62 #include <netinet/in_var.h>
   63 #include <netinet/ip.h>
   64 #endif
   65 
   66 #include <net/bpf.h>
   67 #include <net/bpfdesc.h>
   68 
   69 #include <dev/mii/mii.h>
   70 #include <dev/mii/miivar.h>
   71 #include <arm/at91/if_atereg.h>
   72 
   73 #include "miibus_if.h"
   74 
   75 #define ATE_MAX_TX_BUFFERS 2            /* We have ping-pong tx buffers */
   76 #define ATE_MAX_RX_BUFFERS 64
   77 
   78 struct ate_softc
   79 {
   80         struct ifnet *ifp;              /* ifnet pointer */
   81         struct mtx sc_mtx;              /* basically a perimeter lock */
   82         device_t dev;                   /* Myself */
   83         device_t miibus;                /* My child miibus */
   84         void *intrhand;                 /* Interrupt handle */
   85         struct resource *irq_res;       /* IRQ resource */
   86         struct resource *mem_res;       /* Memory resource */
   87         struct callout tick_ch;         /* Tick callout */
   88         bus_dma_tag_t mtag;             /* bus dma tag for mbufs */
   89         bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
   90         struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
   91         bus_dma_tag_t rxtag;
   92         bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
   93         void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
   94         int rx_buf_ptr;
   95         bus_dma_tag_t rx_desc_tag;
   96         bus_dmamap_t rx_desc_map;
   97         int txcur;                      /* current tx map pointer */
   98         bus_addr_t rx_desc_phys;
   99         eth_rx_desc_t *rx_descs;
  100         int use_rmii;
  101         struct  ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
  102 };
  103 
  104 static inline uint32_t
  105 RD4(struct ate_softc *sc, bus_size_t off)
  106 {
  107         return bus_read_4(sc->mem_res, off);
  108 }
  109 
  110 static inline void
  111 WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
  112 {
  113         bus_write_4(sc->mem_res, off, val);
  114 }
  115 
  116 #define ATE_LOCK(_sc)           mtx_lock(&(_sc)->sc_mtx)
  117 #define ATE_UNLOCK(_sc)         mtx_unlock(&(_sc)->sc_mtx)
  118 #define ATE_LOCK_INIT(_sc) \
  119         mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
  120             MTX_NETWORK_LOCK, MTX_DEF)
  121 #define ATE_LOCK_DESTROY(_sc)   mtx_destroy(&_sc->sc_mtx);
  122 #define ATE_ASSERT_LOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_OWNED);
  123 #define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
  124 
  125 static devclass_t ate_devclass;
  126 
  127 /* ifnet entry points */
  128 
  129 static void ateinit_locked(void *);
  130 static void atestart_locked(struct ifnet *);
  131 
  132 static void ateinit(void *);
  133 static void atestart(struct ifnet *);
  134 static void atestop(struct ate_softc *);
  135 static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
  136 
  137 /* bus entry points */
  138 
  139 static int ate_probe(device_t dev);
  140 static int ate_attach(device_t dev);
  141 static int ate_detach(device_t dev);
  142 static void ate_intr(void *);
  143 
  144 /* helper routines */
  145 static int ate_activate(device_t dev);
  146 static void ate_deactivate(device_t dev);
  147 static int ate_ifmedia_upd(struct ifnet *ifp);
  148 static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
  149 static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
  150 static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
  151 
  152 /*
  153  * The AT91 family of products has the ethernet called EMAC.  However,
  154  * it isn't self identifying.  It is anticipated that the parent bus
  155  * code will take care to only add ate devices where they really are.  As
  156  * such, we do nothing here to identify the device and just set its name.
  157  */
  158 static int
  159 ate_probe(device_t dev)
  160 {
  161         device_set_desc(dev, "EMAC");
  162         return (0);
  163 }
  164 
  165 static int
  166 ate_attach(device_t dev)
  167 {
  168         struct ate_softc *sc = device_get_softc(dev);
  169         struct ifnet *ifp = NULL;
  170         struct sysctl_ctx_list *sctx;
  171         struct sysctl_oid *soid;
  172         int err;
  173         u_char eaddr[ETHER_ADDR_LEN];
  174         uint32_t rnd;
  175 
  176         sc->dev = dev;
  177         err = ate_activate(dev);
  178         if (err)
  179                 goto out;
  180 
  181         sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
  182 
  183         /* Sysctls */
  184         sctx = device_get_sysctl_ctx(dev);
  185         soid = device_get_sysctl_tree(dev);
  186         SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
  187             CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
  188 
  189         /* calling atestop before ifp is set is OK */
  190         atestop(sc);
  191         ATE_LOCK_INIT(sc);
  192         callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
  193 
  194         if ((err = ate_get_mac(sc, eaddr)) != 0) {
  195                 /*
  196                  * No MAC address configured. Generate the random one.
  197                  */
  198                 if  (bootverbose)
  199                         device_printf(dev,
  200                             "Generating random ethernet address.\n");
  201                 rnd = arc4random();
  202 
  203                 /*
  204                  * Set OUI to convenient locally assigned address.  'b'
  205                  * is 0x62, which has the locally assigned bit set, and
  206                  * the broadcast/multicast bit clear.
  207                  */
  208                 eaddr[0] = 'b';
  209                 eaddr[1] = 's';
  210                 eaddr[2] = 'd';
  211                 eaddr[3] = (rnd >> 16) & 0xff;
  212                 eaddr[4] = (rnd >> 8) & 0xff;
  213                 eaddr[5] = rnd & 0xff;
  214         }
  215         ate_set_mac(sc, eaddr);
  216 
  217         sc->ifp = ifp = if_alloc(IFT_ETHER);
  218         err = mii_attach(dev, &sc->miibus, ifp, ate_ifmedia_upd,
  219             ate_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
  220         if (err != 0) {
  221                 device_printf(dev, "attaching PHYs failed\n");
  222                 goto out;
  223         }
  224 
  225         ifp->if_softc = sc;
  226         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
  227         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  228         ifp->if_capabilities |= IFCAP_VLAN_MTU;
  229         ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
  230         ifp->if_start = atestart;
  231         ifp->if_ioctl = ateioctl;
  232         ifp->if_init = ateinit;
  233         ifp->if_baudrate = 10000000;
  234         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
  235         ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
  236         IFQ_SET_READY(&ifp->if_snd);
  237         ifp->if_timer = 0;
  238         ifp->if_linkmib = &sc->mibdata;
  239         ifp->if_linkmiblen = sizeof(sc->mibdata);
  240         sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
  241 
  242         ether_ifattach(ifp, eaddr);
  243 
  244         /*
  245          * Activate the interrupt
  246          */
  247         err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
  248             NULL, ate_intr, sc, &sc->intrhand);
  249         if (err) {
  250                 ether_ifdetach(ifp);
  251                 ATE_LOCK_DESTROY(sc);
  252         }
  253 out:;
  254         if (err)
  255                 ate_deactivate(dev);
  256         if (err && ifp)
  257                 if_free(ifp);
  258         return (err);
  259 }
  260 
  261 static int
  262 ate_detach(device_t dev)
  263 {
  264         return EBUSY;   /* XXX TODO(1) */
  265 }
  266 
  267 static void
  268 ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  269 {
  270         struct ate_softc *sc;
  271 
  272         if (error != 0)
  273                 return;
  274         sc = (struct ate_softc *)arg;
  275         sc->rx_desc_phys = segs[0].ds_addr;
  276 }
  277 
  278 static void
  279 ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
  280 {
  281         struct ate_softc *sc;
  282         int i;
  283 
  284         if (error != 0)
  285                 return;
  286         sc = (struct ate_softc *)arg;
  287         i = sc->rx_buf_ptr;
  288 
  289         /*
  290          * For the last buffer, set the wrap bit so the controller
  291          * restarts from the first descriptor.
  292          */
  293         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  294         if (i == ATE_MAX_RX_BUFFERS - 1)
  295                 sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
  296         else
  297                 sc->rx_descs[i].addr = segs[0].ds_addr;
  298         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
  299         sc->rx_descs[i].status = 0;
  300         /* Flush the memory in the mbuf */
  301         bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
  302 }
  303 
  304 /*
  305  * Compute the multicast filter for this device using the standard
  306  * algorithm.  I wonder why this isn't in ether somewhere as a lot
  307  * of different MAC chips use this method (or the reverse the bits)
  308  * method.
  309  */
  310 static void
  311 ate_setmcast(struct ate_softc *sc)
  312 {
  313         uint32_t index;
  314         uint32_t mcaf[2];
  315         u_char *af = (u_char *) mcaf;
  316         struct ifmultiaddr *ifma;
  317 
  318         mcaf[0] = 0;
  319         mcaf[1] = 0;
  320 
  321         IF_ADDR_LOCK(sc->ifp);
  322         TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
  323                 if (ifma->ifma_addr->sa_family != AF_LINK)
  324                         continue;
  325                 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
  326                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
  327                 af[index >> 3] |= 1 << (index & 7);
  328         }
  329         IF_ADDR_UNLOCK(sc->ifp);
  330 
  331         /*
  332          * Write the hash to the hash register.  This card can also
  333          * accept unicast packets as well as multicast packets using this
  334          * register for easier bridging operations, but we don't take
  335          * advantage of that.  Locks here are to avoid LOR with the
  336          * IF_ADDR_LOCK, but might not be strictly necessary.
  337          */
  338         WR4(sc, ETH_HSL, mcaf[0]);
  339         WR4(sc, ETH_HSH, mcaf[1]);
  340 }
  341 
  342 static int
  343 ate_activate(device_t dev)
  344 {
  345         struct ate_softc *sc;
  346         int rid, err, i;
  347 
  348         sc = device_get_softc(dev);
  349         rid = 0;
  350         sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  351             RF_ACTIVE);
  352         if (sc->mem_res == NULL)
  353                 goto errout;
  354         rid = 0;
  355         sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
  356             RF_ACTIVE);
  357         if (sc->irq_res == NULL)
  358                 goto errout;
  359 
  360         /*
  361          * Allocate DMA tags and maps
  362          */
  363         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  364             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  365             busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
  366         if (err != 0)
  367                 goto errout;
  368         for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
  369                 err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
  370                 if (err != 0)
  371                         goto errout;
  372         }
  373          /*
  374           * Allocate our Rx buffers.  This chip has a rx structure that's filled
  375           * in
  376           */
  377         
  378         /*
  379          * Allocate DMA tags and maps for RX.
  380          */
  381         err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  382             BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
  383             busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
  384         if (err != 0)
  385                 goto errout;
  386 
  387         /* Dma TAG and MAP for the rx descriptors. */
  388         err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0, 
  389             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  390             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
  391             ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
  392             &sc->sc_mtx, &sc->rx_desc_tag);
  393         if (err != 0)
  394                 goto errout;
  395         if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
  396             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
  397                 goto errout;
  398         if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
  399             sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
  400             ate_getaddr, sc, 0) != 0)
  401                 goto errout;
  402         /* XXX TODO(5) Put this in ateinit_locked? */
  403         for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
  404                 sc->rx_buf_ptr = i;
  405                 if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
  406                       BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
  407                         goto errout;
  408                 if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
  409                     MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
  410                         goto errout;
  411         }
  412         sc->rx_buf_ptr = 0;
  413         /* Flush the memory for the EMAC rx descriptor */
  414         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
  415         /* Write the descriptor queue address. */
  416         WR4(sc, ETH_RBQP, sc->rx_desc_phys);
  417         return (0);
  418 errout:
  419         ate_deactivate(dev);
  420         return (ENOMEM);
  421 }
  422 
  423 static void
  424 ate_deactivate(device_t dev)
  425 {
  426         struct ate_softc *sc;
  427 
  428         sc = device_get_softc(dev);
  429         /* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
  430 #if 0
  431         if (sc->fxp_mtag) {
  432                 for (i = 0; i < FXP_NRFABUFS; i++) {
  433                         rxp = &sc->fxp_desc.rx_list[i];
  434                         if (rxp->rx_mbuf != NULL) {
  435                                 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
  436                                     BUS_DMASYNC_POSTREAD);
  437                                 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
  438                                 m_freem(rxp->rx_mbuf);
  439                         }
  440                         bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
  441                 }
  442                 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
  443                 for (i = 0; i < FXP_NTXCB; i++) {
  444                         txp = &sc->fxp_desc.tx_list[i];
  445                         if (txp->tx_mbuf != NULL) {
  446                                 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
  447                                     BUS_DMASYNC_POSTWRITE);
  448                                 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
  449                                 m_freem(txp->tx_mbuf);
  450                         }
  451                         bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
  452                 }
  453                 bus_dma_tag_destroy(sc->fxp_mtag);
  454         }
  455         if (sc->fxp_stag)
  456                 bus_dma_tag_destroy(sc->fxp_stag);
  457         if (sc->cbl_tag)
  458                 bus_dma_tag_destroy(sc->cbl_tag);
  459         if (sc->mcs_tag)
  460                 bus_dma_tag_destroy(sc->mcs_tag);
  461 #endif
  462         if (sc->intrhand)
  463                 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
  464         sc->intrhand = 0;
  465         bus_generic_detach(sc->dev);
  466         if (sc->miibus)
  467                 device_delete_child(sc->dev, sc->miibus);
  468         if (sc->mem_res)
  469                 bus_release_resource(dev, SYS_RES_IOPORT,
  470                     rman_get_rid(sc->mem_res), sc->mem_res);
  471         sc->mem_res = 0;
  472         if (sc->irq_res)
  473                 bus_release_resource(dev, SYS_RES_IRQ,
  474                     rman_get_rid(sc->irq_res), sc->irq_res);
  475         sc->irq_res = 0;
  476         return;
  477 }
  478 
  479 /*
  480  * Change media according to request.
  481  */
  482 static int
  483 ate_ifmedia_upd(struct ifnet *ifp)
  484 {
  485         struct ate_softc *sc = ifp->if_softc;
  486         struct mii_data *mii;
  487 
  488         mii = device_get_softc(sc->miibus);
  489         ATE_LOCK(sc);
  490         mii_mediachg(mii);
  491         ATE_UNLOCK(sc);
  492         return (0);
  493 }
  494 
  495 /*
  496  * Notify the world which media we're using.
  497  */
  498 static void
  499 ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  500 {
  501         struct ate_softc *sc = ifp->if_softc;
  502         struct mii_data *mii;
  503 
  504         mii = device_get_softc(sc->miibus);
  505         ATE_LOCK(sc);
  506         mii_pollstat(mii);
  507         ifmr->ifm_active = mii->mii_media_active;
  508         ifmr->ifm_status = mii->mii_media_status;
  509         ATE_UNLOCK(sc);
  510 }
  511 
  512 static void
  513 ate_stat_update(struct ate_softc *sc, int active)
  514 {
  515         /*
  516          * The speed and full/half-duplex state needs to be reflected
  517          * in the ETH_CFG register.
  518          */
  519         if (IFM_SUBTYPE(active) == IFM_10_T)
  520                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
  521         else
  522                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
  523         if (active & IFM_FDX)
  524                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
  525         else
  526                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
  527 }
  528 
  529 static void
  530 ate_tick(void *xsc)
  531 {
  532         struct ate_softc *sc = xsc;
  533         struct ifnet *ifp = sc->ifp;
  534         struct mii_data *mii;
  535         int active;
  536         uint32_t c;
  537 
  538         /*
  539          * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
  540          * the MII if there's a link if this bit is clear.  Not sure if we
  541          * should do the same thing here or not.
  542          */
  543         ATE_ASSERT_LOCKED(sc);
  544         if (sc->miibus != NULL) {
  545                 mii = device_get_softc(sc->miibus);
  546                 active = mii->mii_media_active;
  547                 mii_tick(mii);
  548                 if (mii->mii_media_status & IFM_ACTIVE &&
  549                      active != mii->mii_media_active)
  550                         ate_stat_update(sc, mii->mii_media_active);
  551         }
  552 
  553         /*
  554          * Update the stats as best we can.  When we're done, clear
  555          * the status counters and start over.  We're supposed to read these
  556          * registers often enough that they won't overflow.  Hopefully
  557          * once a second is often enough.  Some don't map well to
  558          * the dot3Stats mib, so for those we just count them as general
  559          * errors.  Stats for iframes, ibutes, oframes and obytes are
  560          * collected elsewhere.  These registers zero on a read to prevent
  561          * races.  For all the collision stats, also update the collision
  562          * stats for the interface.
  563          */
  564         sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
  565         sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
  566         c = RD4(sc, ETH_SCOL);
  567         ifp->if_collisions += c;
  568         sc->mibdata.dot3StatsSingleCollisionFrames += c;
  569         c = RD4(sc, ETH_MCOL);
  570         sc->mibdata.dot3StatsMultipleCollisionFrames += c;
  571         ifp->if_collisions += c;
  572         sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
  573         sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
  574         c = RD4(sc, ETH_LCOL);
  575         sc->mibdata.dot3StatsLateCollisions += c;
  576         ifp->if_collisions += c;
  577         c = RD4(sc, ETH_ECOL);
  578         sc->mibdata.dot3StatsExcessiveCollisions += c;
  579         ifp->if_collisions += c;
  580         sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
  581         sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
  582         sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
  583         /*
  584          * not sure where to lump these, so count them against the errors
  585          * for the interface.
  586          */
  587         sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
  588         sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
  589             RD4(sc, ETH_USF);
  590 
  591         /*
  592          * Schedule another timeout one second from now.
  593          */
  594         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  595 }
  596 
  597 static void
  598 ate_set_mac(struct ate_softc *sc, u_char *eaddr)
  599 {
  600         WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
  601             (eaddr[1] << 8) | eaddr[0]);
  602         WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
  603 }
  604 
  605 static int
  606 ate_get_mac(struct ate_softc *sc, u_char *eaddr)
  607 {
  608         bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
  609         bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
  610         uint32_t low, high;
  611         int i;
  612 
  613         /*
  614          * The boot loader setup the MAC with an address, if one is set in
  615          * the loader. Grab one MAC address from the SA[1-4][HL] registers.
  616          */
  617         for (i = 0; i < 4; i++) {
  618                 low = RD4(sc, sa_low_reg[i]);
  619                 high = RD4(sc, sa_high_reg[i]);
  620                 if ((low | (high & 0xffff)) != 0) {
  621                         eaddr[0] = low & 0xff;
  622                         eaddr[1] = (low >> 8) & 0xff;
  623                         eaddr[2] = (low >> 16) & 0xff;
  624                         eaddr[3] = (low >> 24) & 0xff;
  625                         eaddr[4] = high & 0xff;
  626                         eaddr[5] = (high >> 8) & 0xff;
  627                         return (0);
  628                 }
  629         }
  630         return (ENXIO);
  631 }
  632 
  633 static void
  634 ate_intr(void *xsc)
  635 {
  636         struct ate_softc *sc = xsc;
  637         struct ifnet *ifp = sc->ifp;
  638         int status;
  639         int i;
  640         void *bp;
  641         struct mbuf *mb;
  642         uint32_t rx_stat;
  643 
  644         status = RD4(sc, ETH_ISR);
  645         if (status == 0)
  646                 return;
  647         if (status & ETH_ISR_RCOM) {
  648                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  649                     BUS_DMASYNC_POSTREAD);
  650                 while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
  651                         i = sc->rx_buf_ptr;
  652                         sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
  653                         bp = sc->rx_buf[i];
  654                         rx_stat = sc->rx_descs[i].status;
  655                         if ((rx_stat & ETH_LEN_MASK) == 0) {
  656                                 printf("ignoring bogus 0 len packet\n");
  657                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  658                                     BUS_DMASYNC_PREWRITE);
  659                                 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  660                                 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  661                                     BUS_DMASYNC_POSTWRITE);
  662                                 continue;
  663                         }
  664                         /* Flush memory for mbuf so we don't get stale bytes */
  665                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  666                             BUS_DMASYNC_POSTREAD);
  667                         WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
  668 
  669                         /*
  670                          * The length returned by the device includes the
  671                          * ethernet CRC calculation for the packet, but
  672                          * ifnet drivers are supposed to discard it.
  673                          */
  674                         mb = m_devget(sc->rx_buf[i],
  675                             (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
  676                             ETHER_ALIGN, ifp, NULL);
  677                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  678                             BUS_DMASYNC_PREWRITE);
  679                         sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
  680                         bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
  681                             BUS_DMASYNC_POSTWRITE);
  682                         bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
  683                             BUS_DMASYNC_PREREAD);
  684                         if (mb != NULL) {
  685                                 ifp->if_ipackets++;
  686                                 (*ifp->if_input)(ifp, mb);
  687                         }
  688                         
  689                 }
  690         }
  691         if (status & ETH_ISR_TCOM) {
  692                 ATE_LOCK(sc);
  693                 /* XXX TSR register should be cleared */
  694                 if (sc->sent_mbuf[0]) {
  695                         bus_dmamap_sync(sc->mtag, sc->tx_map[0],
  696                             BUS_DMASYNC_POSTWRITE);
  697                         m_freem(sc->sent_mbuf[0]);
  698                         ifp->if_opackets++;
  699                         sc->sent_mbuf[0] = NULL;
  700                 }
  701                 if (sc->sent_mbuf[1]) {
  702                         if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
  703                                 bus_dmamap_sync(sc->mtag, sc->tx_map[1],
  704                                     BUS_DMASYNC_POSTWRITE);
  705                                 m_freem(sc->sent_mbuf[1]);
  706                                 ifp->if_opackets++;
  707                                 sc->txcur = 0;
  708                                 sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
  709                         } else {
  710                                 sc->sent_mbuf[0] = sc->sent_mbuf[1];
  711                                 sc->sent_mbuf[1] = NULL;
  712                                 sc->txcur = 1;
  713                         }
  714                 } else {
  715                         sc->sent_mbuf[0] = NULL;
  716                         sc->txcur = 0;
  717                 }
  718                 /*
  719                  * We're no longer busy, so clear the busy flag and call the
  720                  * start routine to xmit more packets.
  721                  */
  722                 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  723                 atestart_locked(sc->ifp);
  724                 ATE_UNLOCK(sc);
  725         }
  726         if (status & ETH_ISR_RBNA) {
  727                 printf("RBNA workaround\n");
  728                 /* Workaround Errata #11 */
  729                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
  730                 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
  731         }
  732 }
  733 
  734 /*
  735  * Reset and initialize the chip
  736  */
  737 static void
  738 ateinit_locked(void *xsc)
  739 {
  740         struct ate_softc *sc = xsc;
  741         struct ifnet *ifp = sc->ifp;
  742         struct mii_data *mii;
  743 
  744         ATE_ASSERT_LOCKED(sc);
  745 
  746         /*
  747          * XXX TODO(3)
  748          * we need to turn on the EMAC clock in the pmc.  With the
  749          * default boot loader, this is already turned on.  However, we
  750          * need to think about how best to turn it on/off as the interface
  751          * is brought up/down, as well as dealing with the mii bus...
  752          *
  753          * We also need to multiplex the pins correctly.
  754          */
  755 
  756         /*
  757          * There are two different ways that the mii bus is connected
  758          * to this chip.  Select the right one based on a compile-time
  759          * option.
  760          */
  761         if (sc->use_rmii)
  762                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
  763         else
  764                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
  765 
  766         /*
  767          * Turn on the multicast hash, and write 0's to it.
  768          */
  769         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI);
  770         WR4(sc, ETH_HSH, 0);
  771         WR4(sc, ETH_HSL, 0);
  772 
  773         WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
  774         WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
  775 
  776         /*
  777          * Boot loader fills in MAC address.  If that's not the case, then
  778          * we should set SA1L and SA1H here to the appropriate value.  Note:
  779          * the byte order is big endian, not little endian, so we have some
  780          * swapping to do.  Again, if we need it (which I don't think we do).
  781          */
  782         ate_setmcast(sc);
  783 
  784         /* enable big packets */
  785         WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  786 
  787         /*
  788          * Set 'running' flag, and clear output active flag
  789          * and attempt to start the output
  790          */
  791         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  792         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  793 
  794         mii = device_get_softc(sc->miibus);
  795         mii_pollstat(mii);
  796         ate_stat_update(sc, mii->mii_media_active);
  797         atestart_locked(ifp);
  798 
  799         callout_reset(&sc->tick_ch, hz, ate_tick, sc);
  800 }
  801 
  802 /*
  803  * dequeu packets and transmit
  804  */
  805 static void
  806 atestart_locked(struct ifnet *ifp)
  807 {
  808         struct ate_softc *sc = ifp->if_softc;
  809         struct mbuf *m, *mdefrag;
  810         bus_dma_segment_t segs[1];
  811         int nseg, e;
  812 
  813         ATE_ASSERT_LOCKED(sc);
  814         if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
  815                 return;
  816 
  817         while (sc->txcur < ATE_MAX_TX_BUFFERS) {
  818                 /*
  819                  * check to see if there's room to put another packet into the
  820                  * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
  821                  * packets.  We use OACTIVE to indicate "we can stuff more into
  822                  * our buffers (clear) or not (set)."
  823                  */
  824                 if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
  825                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
  826                         return;
  827                 }
  828                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
  829                 if (m == 0) {
  830                         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  831                         return;
  832                 }
  833                 e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
  834                     segs, &nseg, 0);
  835                 if (e == EFBIG) {
  836                         mdefrag = m_defrag(m, M_DONTWAIT);
  837                         if (mdefrag == NULL) {
  838                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
  839                                 return;
  840                         }
  841                         m = mdefrag;
  842                         e = bus_dmamap_load_mbuf_sg(sc->mtag,
  843                             sc->tx_map[sc->txcur], m, segs, &nseg, 0);
  844                 }
  845                 if (e != 0) {
  846                         m_freem(m);
  847                         continue;
  848                 }
  849                 bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
  850                     BUS_DMASYNC_PREWRITE);
  851 
  852                 /*
  853                  * tell the hardware to xmit the packet.
  854                  */
  855                 WR4(sc, ETH_TAR, segs[0].ds_addr);
  856                 WR4(sc, ETH_TCR, segs[0].ds_len);
  857         
  858                 /*
  859                  * Tap off here if there is a bpf listener.
  860                  */
  861                 BPF_MTAP(ifp, m);
  862 
  863                 sc->sent_mbuf[sc->txcur] = m;
  864                 sc->txcur++;
  865         }
  866 }
  867 
  868 static void
  869 ateinit(void *xsc)
  870 {
  871         struct ate_softc *sc = xsc;
  872         ATE_LOCK(sc);
  873         ateinit_locked(sc);
  874         ATE_UNLOCK(sc);
  875 }
  876 
  877 static void
  878 atestart(struct ifnet *ifp)
  879 {
  880         struct ate_softc *sc = ifp->if_softc;
  881         ATE_LOCK(sc);
  882         atestart_locked(ifp);
  883         ATE_UNLOCK(sc);
  884 }
  885 
  886 /*
  887  * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
  888  * so be careful.
  889  */
  890 static void
  891 atestop(struct ate_softc *sc)
  892 {
  893         struct ifnet *ifp = sc->ifp;
  894 
  895         if (ifp) {
  896                 ifp->if_timer = 0;
  897                 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  898         }
  899 
  900         callout_stop(&sc->tick_ch);
  901 
  902         /*
  903          * Enable some parts of the MAC that are needed always (like the
  904          * MII bus.  This turns off the RE and TE bits, which will remain
  905          * off until ateinit() is called to turn them on.  With RE and TE
  906          * turned off, there's no DMA to worry about after this write.
  907          */
  908         WR4(sc, ETH_CTL, ETH_CTL_MPE);
  909 
  910         /*
  911          * Turn off all the configured options and revert to defaults.
  912          */
  913         WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
  914 
  915         /*
  916          * Turn off all the interrupts, and ack any pending ones by reading
  917          * the ISR.
  918          */
  919         WR4(sc, ETH_IDR, 0xffffffff);
  920         RD4(sc, ETH_ISR);
  921 
  922         /*
  923          * Clear out the Transmit and Receiver Status registers of any
  924          * errors they may be reporting
  925          */
  926         WR4(sc, ETH_TSR, 0xffffffff);
  927         WR4(sc, ETH_RSR, 0xffffffff);
  928 
  929         /*
  930          * XXX TODO(8)
  931          * need to worry about the busdma resources?  Yes, I think we need
  932          * to sync and unload them.  We may also need to release the mbufs
  933          * that are assocaited with RX and TX operations.
  934          */
  935 
  936         /*
  937          * XXX we should power down the EMAC if it isn't in use, after
  938          * putting it into loopback mode.  This saves about 400uA according
  939          * to the datasheet.
  940          */
  941 }
  942 
  943 static int
  944 ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
  945 {
  946         struct ate_softc *sc = ifp->if_softc;
  947         struct mii_data *mii;
  948         struct ifreq *ifr = (struct ifreq *)data;       
  949         int mask, error = 0;
  950 
  951         switch (cmd) {
  952         case SIOCSIFFLAGS:
  953                 ATE_LOCK(sc);
  954                 if ((ifp->if_flags & IFF_UP) == 0 &&
  955                     ifp->if_drv_flags & IFF_DRV_RUNNING) {
  956                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
  957                         atestop(sc);
  958                 } else {
  959                         /* reinitialize card on any parameter change */
  960                         ateinit_locked(sc);
  961                 }
  962                 ATE_UNLOCK(sc);
  963                 break;
  964 
  965         case SIOCADDMULTI:
  966         case SIOCDELMULTI:
  967                 /* update multicast filter list. */
  968                 ATE_LOCK(sc);
  969                 ate_setmcast(sc);
  970                 ATE_UNLOCK(sc);
  971                 error = 0;
  972                 break;
  973 
  974         case SIOCSIFMEDIA:
  975         case SIOCGIFMEDIA:
  976                 mii = device_get_softc(sc->miibus);
  977                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
  978                 break;
  979         case SIOCSIFCAP:
  980                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
  981                 if (mask & IFCAP_VLAN_MTU) {
  982                         ATE_LOCK(sc);
  983                         if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
  984                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
  985                                 ifp->if_capenable |= IFCAP_VLAN_MTU;
  986                         } else {
  987                                 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
  988                                 ifp->if_capenable &= ~IFCAP_VLAN_MTU;
  989                         }
  990                         ATE_UNLOCK(sc);
  991                 }
  992         default:
  993                 error = ether_ioctl(ifp, cmd, data);
  994                 break;
  995         }
  996         return (error);
  997 }
  998 
  999 static void
 1000 ate_child_detached(device_t dev, device_t child)
 1001 {
 1002         struct ate_softc *sc;
 1003 
 1004         sc = device_get_softc(dev);
 1005         if (child == sc->miibus)
 1006                 sc->miibus = NULL;
 1007 }
 1008 
 1009 /*
 1010  * MII bus support routines.
 1011  */
 1012 static int
 1013 ate_miibus_readreg(device_t dev, int phy, int reg)
 1014 {
 1015         struct ate_softc *sc;
 1016         int val;
 1017 
 1018         /*
 1019          * XXX if we implement agressive power savings, then we need
 1020          * XXX to make sure that the clock to the emac is on here
 1021          */
 1022 
 1023         sc = device_get_softc(dev);
 1024         DELAY(1);       /* Hangs w/o this delay really 30.5us atm */
 1025         WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
 1026         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1027                 continue;
 1028         val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
 1029 
 1030         return (val);
 1031 }
 1032 
 1033 static void
 1034 ate_miibus_writereg(device_t dev, int phy, int reg, int data)
 1035 {
 1036         struct ate_softc *sc;
 1037         
 1038         /*
 1039          * XXX if we implement agressive power savings, then we need
 1040          * XXX to make sure that the clock to the emac is on here
 1041          */
 1042 
 1043         sc = device_get_softc(dev);
 1044         WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
 1045         while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
 1046                 continue;
 1047         return;
 1048 }
 1049 
 1050 static device_method_t ate_methods[] = {
 1051         /* Device interface */
 1052         DEVMETHOD(device_probe,         ate_probe),
 1053         DEVMETHOD(device_attach,        ate_attach),
 1054         DEVMETHOD(device_detach,        ate_detach),
 1055 
 1056         /* Bus interface */
 1057         DEVMETHOD(bus_child_detached,   ate_child_detached),
 1058 
 1059         /* MII interface */
 1060         DEVMETHOD(miibus_readreg,       ate_miibus_readreg),
 1061         DEVMETHOD(miibus_writereg,      ate_miibus_writereg),
 1062 
 1063         { 0, 0 }
 1064 };
 1065 
 1066 static driver_t ate_driver = {
 1067         "ate",
 1068         ate_methods,
 1069         sizeof(struct ate_softc),
 1070 };
 1071 
 1072 DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
 1073 DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
 1074 MODULE_DEPEND(ate, miibus, 1, 1, 1);
 1075 MODULE_DEPEND(ate, ether, 1, 1, 1);

Cache object: eeb70ebcfd25f2ca4560438e5863e8b3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.