The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/mtd8xx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: mtd8xx.c,v 1.35 2022/01/09 05:42:38 jsg Exp $ */
    2 
    3 /*
    4  * Copyright (c) 2003 Oleg Safiullin <form@pdp11.org.ru>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  */
   30 
   31 #include "bpfilter.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/mbuf.h>
   35 #include <sys/systm.h>
   36 #include <sys/device.h>
   37 #include <sys/socket.h>
   38 #include <sys/ioctl.h>
   39 
   40 #include <net/if.h>
   41 #include <net/if_media.h>
   42 
   43 #if NBPFILTER > 0
   44 #include <net/bpf.h>
   45 #endif
   46 
   47 #include <netinet/in.h>
   48 #include <netinet/if_ether.h>
   49 
   50 #include <machine/bus.h>
   51 
   52 #include <dev/mii/mii.h>
   53 #include <dev/mii/miivar.h>
   54 
   55 #include <dev/pci/pcidevs.h>
   56 
   57 #include <dev/ic/mtd8xxreg.h>
   58 #include <dev/ic/mtd8xxvar.h>
   59 
   60 
   61 static int mtd_ifmedia_upd(struct ifnet *);
   62 static void mtd_ifmedia_sts(struct ifnet *, struct ifmediareq *);
   63 
   64 static u_int32_t mtd_mii_command(struct mtd_softc *, int, int, int);
   65 static int mtd_miibus_readreg(struct device *, int, int);
   66 static void mtd_miibus_writereg(struct device *, int, int, int);
   67 static void mtd_miibus_statchg(struct device *);
   68 static void mtd_setmulti(struct mtd_softc *);
   69 
   70 static int mtd_encap(struct mtd_softc *, struct mbuf *, u_int32_t *);
   71 static int mtd_list_rx_init(struct mtd_softc *);
   72 static void mtd_list_tx_init(struct mtd_softc *);
   73 static int mtd_newbuf(struct mtd_softc *, int, struct mbuf *);
   74 
   75 static void mtd_reset(struct mtd_softc *sc);
   76 static int mtd_ioctl(struct ifnet *, u_long, caddr_t);
   77 static void mtd_init(struct ifnet *);
   78 static void mtd_start(struct ifnet *);
   79 static void mtd_stop(struct ifnet *);
   80 static void mtd_watchdog(struct ifnet *);
   81 
   82 static int mtd_rxeof(struct mtd_softc *);
   83 static int mtd_rx_resync(struct mtd_softc *);
   84 static void mtd_txeof(struct mtd_softc *);
   85 
   86 
   87 void
   88 mtd_attach(struct mtd_softc *sc)
   89 {
   90         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
   91         u_int32_t enaddr[2];
   92         int i;
   93 
   94         /* Reset the adapter. */
   95         mtd_reset(sc);
   96 
   97         if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mtd_list_data),
   98             PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
   99             BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
  100                 printf(": can't alloc list mem\n");
  101                 return;
  102         }
  103         if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
  104             sizeof(struct mtd_list_data), &sc->sc_listkva,
  105             BUS_DMA_NOWAIT) != 0) {
  106                 printf(": can't map list mem\n");
  107                 return;
  108         }
  109         if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mtd_list_data), 1,
  110             sizeof(struct mtd_list_data), 0, BUS_DMA_NOWAIT,
  111             &sc->sc_listmap) != 0) {
  112                 printf(": can't alloc list map\n");
  113                 return;
  114         }
  115         if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
  116             sizeof(struct mtd_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
  117                 printf(": can't load list map\n");
  118                 return;
  119         }
  120         sc->mtd_ldata = (struct mtd_list_data *)sc->sc_listkva;
  121 
  122         for (i = 0; i < MTD_RX_LIST_CNT; i++) {
  123                 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
  124                     0, BUS_DMA_NOWAIT,
  125                     &sc->mtd_cdata.mtd_rx_chain[i].sd_map) != 0) {
  126                         printf(": can't create rx map\n");
  127                         return;
  128                 }
  129         }
  130         if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
  131             BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
  132                 printf(": can't create rx spare map\n");
  133                 return;
  134         }
  135 
  136         for (i = 0; i < MTD_TX_LIST_CNT; i++) {
  137                 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
  138                     MTD_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
  139                     &sc->mtd_cdata.mtd_tx_chain[i].sd_map) != 0) {
  140                         printf(": can't create tx map\n");
  141                         return;
  142                 }
  143         }
  144         if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, MTD_TX_LIST_CNT - 5,
  145             MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
  146                 printf(": can't create tx spare map\n");
  147                 return;
  148         }
  149 
  150 
  151         /* Get station address. */
  152         enaddr[0] = letoh32(CSR_READ_4(MTD_PAR0));
  153         enaddr[1] = letoh32(CSR_READ_4(MTD_PAR4));
  154         bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
  155         printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
  156 
  157         /* Initialize interface */
  158         ifp->if_softc = sc;
  159         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  160         ifp->if_ioctl = mtd_ioctl;
  161         ifp->if_start = mtd_start;
  162         ifp->if_watchdog = mtd_watchdog;
  163         bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
  164 
  165         ifp->if_capabilities = IFCAP_VLAN_MTU;
  166 
  167         /*
  168          * Initialize our media structures and probe the MII.
  169          */
  170         sc->sc_mii.mii_ifp = ifp;
  171         sc->sc_mii.mii_readreg = mtd_miibus_readreg;
  172         sc->sc_mii.mii_writereg = mtd_miibus_writereg;
  173         sc->sc_mii.mii_statchg = mtd_miibus_statchg;
  174         ifmedia_init(&sc->sc_mii.mii_media, 0, mtd_ifmedia_upd,
  175             mtd_ifmedia_sts);
  176         mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
  177             MII_OFFSET_ANY, 0);
  178         if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
  179                 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE, 0,
  180                     NULL);
  181                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_NONE);
  182         } else
  183                 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
  184 
  185         /*
  186          * Attach us everywhere
  187          */
  188         if_attach(ifp);
  189         ether_ifattach(ifp);
  190 }
  191 
  192 
  193 static int
  194 mtd_ifmedia_upd(struct ifnet *ifp)
  195 {
  196         struct mtd_softc *sc = ifp->if_softc;
  197 
  198         return (mii_mediachg(&sc->sc_mii));
  199 }
  200 
  201 
  202 static void
  203 mtd_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
  204 {
  205         struct mtd_softc *sc = ifp->if_softc;
  206 
  207         mii_pollstat(&sc->sc_mii);
  208         ifmr->ifm_active = sc->sc_mii.mii_media_active;
  209         ifmr->ifm_status = sc->sc_mii.mii_media_status;
  210 }
  211 
  212 
  213 static u_int32_t
  214 mtd_mii_command(struct mtd_softc *sc, int opcode, int phy, int reg)
  215 {
  216         u_int32_t miir, mask, data;
  217         int i;
  218 
  219         miir = (CSR_READ_4(MTD_MIIMGT) & ~MIIMGT_MASK) | MIIMGT_WRITE |
  220             MIIMGT_MDO;
  221 
  222         for (i = 0; i < 32; i++) {
  223                 miir &= ~MIIMGT_MDC;
  224                 CSR_WRITE_4(MTD_MIIMGT, miir);
  225                 miir |= MIIMGT_MDC;
  226                 CSR_WRITE_4(MTD_MIIMGT, miir);
  227         }
  228 
  229         data = opcode | (phy << 7) | (reg << 2);
  230 
  231         for (mask = 0; mask; mask >>= 1) {
  232                 miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
  233                 if (mask & data)
  234                         miir |= MIIMGT_MDO;
  235                 CSR_WRITE_4(MTD_MIIMGT, miir);
  236                 miir |= MIIMGT_MDC;
  237                 CSR_WRITE_4(MTD_MIIMGT, miir);
  238                 DELAY(30);
  239 
  240                 if (mask == 0x4 && opcode == MII_OPCODE_RD)
  241                         miir &= ~MIIMGT_WRITE;
  242         }
  243         return (miir);
  244 }
  245 
  246 
  247 
  248 static int
  249 mtd_miibus_readreg(struct device *self, int phy, int reg)
  250 {
  251         struct mtd_softc *sc = (void *)self;
  252 
  253         if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803)
  254                 return (phy ? 0 : (int)CSR_READ_2(MTD_PHYCSR + (reg << 1)));
  255         else {
  256                 u_int32_t miir, mask, data;
  257 
  258                 miir = mtd_mii_command(sc, MII_OPCODE_RD, phy, reg);
  259                 for (mask = 0x8000, data = 0; mask; mask >>= 1) {
  260                         miir &= ~MIIMGT_MDC;
  261                         CSR_WRITE_4(MTD_MIIMGT, miir);
  262                         miir = CSR_READ_4(MTD_MIIMGT);
  263                         if (miir & MIIMGT_MDI)
  264                                 data |= mask;
  265                         miir |= MIIMGT_MDC;
  266                         CSR_WRITE_4(MTD_MIIMGT, miir);
  267                         DELAY(30);
  268                 }
  269                 miir &= ~MIIMGT_MDC;
  270                 CSR_WRITE_4(MTD_MIIMGT, miir);
  271 
  272                 return ((int)data);
  273         }
  274 }
  275 
  276 
  277 static void
  278 mtd_miibus_writereg(struct device *self, int phy, int reg, int val)
  279 {
  280         struct mtd_softc *sc = (void *)self;
  281 
  282         if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD803) {
  283                 if (!phy)
  284                         CSR_WRITE_2(MTD_PHYCSR + (reg << 1), val);
  285         } else {
  286                 u_int32_t miir, mask;
  287 
  288                 miir = mtd_mii_command(sc, MII_OPCODE_WR, phy, reg);
  289                 for (mask = 0x8000; mask; mask >>= 1) {
  290                         miir &= ~(MIIMGT_MDC | MIIMGT_MDO);
  291                         if (mask & (u_int32_t)val)
  292                                 miir |= MIIMGT_MDO;
  293                         CSR_WRITE_4(MTD_MIIMGT, miir);
  294                         miir |= MIIMGT_MDC;
  295                         CSR_WRITE_4(MTD_MIIMGT, miir);
  296                         DELAY(1);
  297                 }
  298                 miir &= ~MIIMGT_MDC;
  299                 CSR_WRITE_4(MTD_MIIMGT, miir);
  300         }
  301 }
  302 
  303 
  304 static void
  305 mtd_miibus_statchg(struct device *self)
  306 {
  307         /* NOTHING */
  308 }
  309 
  310 
  311 void
  312 mtd_setmulti(struct mtd_softc *sc)
  313 {
  314         struct arpcom *ac = &sc->sc_arpcom;
  315         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  316         u_int32_t rxfilt, crc, hash[2] = { 0, 0 };
  317         struct ether_multistep step;
  318         struct ether_multi *enm;
  319         int mcnt = 0;
  320 
  321         if (ac->ac_multirangecnt > 0)
  322                 ifp->if_flags |= IFF_ALLMULTI;
  323 
  324         rxfilt = CSR_READ_4(MTD_TCRRCR) & ~RCR_AM;
  325         if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
  326                 rxfilt |= RCR_AM;
  327                 CSR_WRITE_4(MTD_TCRRCR, rxfilt);
  328                 CSR_WRITE_4(MTD_MAR0, 0xffffffff);
  329                 CSR_WRITE_4(MTD_MAR4, 0xffffffff);
  330                 return;
  331         }
  332 
  333         /* First, zot all the existing hash bits. */
  334         CSR_WRITE_4(MTD_MAR0, 0);
  335         CSR_WRITE_4(MTD_MAR4, 0);
  336 
  337         /* Now program new ones. */
  338         ETHER_FIRST_MULTI(step, ac, enm);
  339         while (enm != NULL) {
  340                 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
  341                 hash[crc >> 5] |= 1 << (crc & 0xf);
  342                 ++mcnt;
  343                 ETHER_NEXT_MULTI(step, enm);
  344         }
  345 
  346         if (mcnt)
  347                 rxfilt |= RCR_AM;
  348         CSR_WRITE_4(MTD_MAR0, hash[0]);
  349         CSR_WRITE_4(MTD_MAR4, hash[1]);
  350         CSR_WRITE_4(MTD_TCRRCR, rxfilt);
  351 }
  352 
  353 
  354 /*
  355  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
  356  * pointers to the fragment pointers.
  357  */
  358 int
  359 mtd_encap(struct mtd_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
  360 {
  361         struct mtd_tx_desc *f = NULL;
  362         int frag, cur, cnt = 0, i, total_len = 0;
  363         bus_dmamap_t map;
  364 
  365         /*
  366          * Start packing the mbufs in this chain into
  367          * the fragment pointers. Stop when we run out
  368          * of fragments or hit the end of the mbuf chain.
  369          */
  370         map = sc->sc_tx_sparemap;
  371 
  372         if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
  373             m_head, BUS_DMA_NOWAIT) != 0)
  374                 return (1);
  375 
  376         cur = frag = *txidx;
  377 
  378         for (i = 0; i < map->dm_nsegs; i++) {
  379                 if ((MTD_TX_LIST_CNT -
  380                     (sc->mtd_cdata.mtd_tx_cnt + cnt)) < 5) {
  381                         bus_dmamap_unload(sc->sc_dmat, map);
  382                         return (1);
  383                 }
  384 
  385                 f = &sc->mtd_ldata->mtd_tx_list[frag];
  386                 f->td_tcw = htole32(map->dm_segs[i].ds_len);
  387                 total_len += map->dm_segs[i].ds_len;
  388                 if (cnt == 0) {
  389                         f->td_tsw = 0;
  390                         f->td_tcw |= htole32(TCW_FD | TCW_CRC | TCW_PAD);
  391                 } else
  392                         f->td_tsw = htole32(TSW_OWN);
  393                 f->td_buf = htole32(map->dm_segs[i].ds_addr);
  394                 cur = frag;
  395                 frag = (frag + 1) % MTD_TX_LIST_CNT;
  396                 cnt++;
  397         }
  398 
  399         sc->mtd_cdata.mtd_tx_cnt += cnt;
  400         sc->mtd_cdata.mtd_tx_chain[cur].sd_mbuf = m_head;
  401         sc->sc_tx_sparemap = sc->mtd_cdata.mtd_tx_chain[cur].sd_map;
  402         sc->mtd_cdata.mtd_tx_chain[cur].sd_map = map;
  403         sc->mtd_ldata->mtd_tx_list[cur].td_tcw |= htole32(TCW_LD | TCW_IC);
  404         if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891)
  405                 sc->mtd_ldata->mtd_tx_list[cur].td_tcw |=
  406                     htole32(TCW_EIC | TCW_RTLC);
  407 
  408         bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
  409             BUS_DMASYNC_PREWRITE);
  410 
  411         sc->mtd_ldata->mtd_tx_list[*txidx].td_tsw = htole32(TSW_OWN);
  412         sc->mtd_ldata->mtd_tx_list[*txidx].td_tcw |=
  413             htole32(total_len << TCW_PKTS_SHIFT);
  414 
  415         bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
  416             offsetof(struct mtd_list_data, mtd_tx_list[0]),
  417             sizeof(struct mtd_tx_desc) * MTD_TX_LIST_CNT,
  418             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  419 
  420         *txidx = frag;
  421 
  422         return (0);
  423 }
  424 
  425 
  426 /*
  427  * Initialize the transmit descriptors.
  428  */
  429 static void
  430 mtd_list_tx_init(struct mtd_softc *sc)
  431 {
  432         struct mtd_chain_data *cd;
  433         struct mtd_list_data *ld;
  434         int i;
  435 
  436         cd = &sc->mtd_cdata;
  437         ld = sc->mtd_ldata;
  438         for (i = 0; i < MTD_TX_LIST_CNT; i++) {
  439                 cd->mtd_tx_chain[i].sd_mbuf = NULL;
  440                 ld->mtd_tx_list[i].td_tsw = 0;
  441                 ld->mtd_tx_list[i].td_tcw = 0;
  442                 ld->mtd_tx_list[i].td_buf = 0;
  443                 ld->mtd_tx_list[i].td_next = htole32(
  444                     sc->sc_listmap->dm_segs[0].ds_addr +
  445                     offsetof(struct mtd_list_data,
  446                     mtd_tx_list[(i + 1) % MTD_TX_LIST_CNT]));
  447         }
  448 
  449         cd->mtd_tx_prod = cd->mtd_tx_cons = cd->mtd_tx_cnt = 0;
  450 }
  451 
  452 
  453 /*
  454  * Initialize the RX descriptors and allocate mbufs for them. Note that
  455  * we arrange the descriptors in a closed ring, so that the last descriptor
  456  * points back to the first.
  457  */
  458 static int
  459 mtd_list_rx_init(struct mtd_softc *sc)
  460 {
  461         struct mtd_list_data *ld;
  462         int i;
  463 
  464         ld = sc->mtd_ldata;
  465 
  466         for (i = 0; i < MTD_RX_LIST_CNT; i++) {
  467                 if (mtd_newbuf(sc, i, NULL))
  468                         return (1);
  469                 ld->mtd_rx_list[i].rd_next = htole32(
  470                     sc->sc_listmap->dm_segs[0].ds_addr +
  471                     offsetof(struct mtd_list_data,
  472                     mtd_rx_list[(i + 1) % MTD_RX_LIST_CNT])
  473                 );
  474         }
  475 
  476         sc->mtd_cdata.mtd_rx_prod = 0;
  477 
  478         return (0);
  479 }
  480 
  481 
  482 /*
  483  * Initialize an RX descriptor and attach an MBUF cluster.
  484  */
  485 static int
  486 mtd_newbuf(struct mtd_softc *sc, int i, struct mbuf *m)
  487 {
  488         struct mbuf *m_new = NULL;
  489         struct mtd_rx_desc *c;
  490         bus_dmamap_t map;
  491 
  492         c = &sc->mtd_ldata->mtd_rx_list[i];
  493 
  494         if (m == NULL) {
  495                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
  496                 if (m_new == NULL)
  497                         return (1);
  498 
  499                 MCLGET(m_new, M_DONTWAIT);
  500                 if (!(m_new->m_flags & M_EXT)) {
  501                         m_freem(m_new);
  502                         return (1);
  503                 }
  504                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  505                 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
  506                     mtod(m_new, caddr_t), MCLBYTES, NULL,
  507                     BUS_DMA_NOWAIT) != 0) {
  508                         m_freem(m_new);
  509                         return (1);
  510                 }
  511                 map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
  512                 sc->mtd_cdata.mtd_rx_chain[i].sd_map = sc->sc_rx_sparemap;
  513                 sc->sc_rx_sparemap = map;
  514         } else {
  515                 m_new = m;
  516                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
  517                 m_new->m_data = m_new->m_ext.ext_buf;
  518         }
  519 
  520         m_adj(m_new, sizeof(u_int64_t));
  521 
  522         bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map, 0,
  523             sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
  524             BUS_DMASYNC_PREREAD);
  525 
  526         sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = m_new;
  527         c->rd_buf = htole32(
  528             sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_segs[0].ds_addr +
  529             sizeof(u_int64_t));
  530         c->rd_rcw = htole32(ETHER_MAX_DIX_LEN);
  531         c->rd_rsr = htole32(RSR_OWN);
  532 
  533         bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
  534             offsetof(struct mtd_list_data, mtd_rx_list[i]),
  535             sizeof(struct mtd_rx_desc),
  536             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  537 
  538         return (0);
  539 }
  540 
  541 
  542 static void
  543 mtd_reset(struct mtd_softc *sc)
  544 {
  545         int i;
  546 
  547         /* Set software reset bit */
  548         CSR_WRITE_4(MTD_BCR, BCR_SWR);
  549 
  550         /*
  551          * Wait until software reset completed.
  552          */
  553         for (i = 0; i < MTD_TIMEOUT; ++i) {
  554                 DELAY(10);
  555                 if (!(CSR_READ_4(MTD_BCR) & BCR_SWR)) {
  556                         /*
  557                          * Wait a little while for the chip to get
  558                          * its brains in order.
  559                          */
  560                         DELAY(1000);
  561                         return;
  562                 }
  563         }
  564 
  565         /* Reset timed out. */
  566         printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
  567 }
  568 
  569 
  570 static int
  571 mtd_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
  572 {
  573         struct mtd_softc *sc = ifp->if_softc;
  574         struct ifreq *ifr = (struct ifreq *)data;
  575         int s, error = 0;
  576 
  577         s = splnet();
  578 
  579         switch (command) {
  580         case SIOCSIFADDR:
  581                 ifp->if_flags |= IFF_UP;
  582                 mtd_init(ifp);
  583                 break;
  584 
  585         case SIOCSIFFLAGS:
  586                 if (ifp->if_flags & IFF_UP)
  587                         mtd_init(ifp);
  588                 else {
  589                         if (ifp->if_flags & IFF_RUNNING)
  590                                 mtd_stop(ifp);
  591                 }
  592                 error = 0;
  593                 break;
  594 
  595         case SIOCGIFMEDIA:
  596         case SIOCSIFMEDIA:
  597                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
  598                 break;
  599         default:
  600                 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
  601         }
  602 
  603         if (error == ENETRESET) {
  604                 if (ifp->if_flags & IFF_RUNNING)
  605                         mtd_setmulti(sc);
  606                 error = 0;
  607         }
  608 
  609         splx(s);
  610         return (error);
  611 }
  612 
  613 
  614 static void
  615 mtd_init(struct ifnet *ifp)
  616 {
  617         struct mtd_softc *sc = ifp->if_softc;
  618         int s;
  619 
  620         s = splnet();
  621 
  622         /*
  623          * Cancel pending I/O and free all RX/TX buffers.
  624          */
  625         mtd_stop(ifp);
  626 
  627         /*
  628          * Reset the chip to a known state.
  629          */
  630         mtd_reset(sc);
  631 
  632         /*
  633          * Set cache alignment and burst length.
  634          */
  635         CSR_WRITE_4(MTD_BCR, BCR_PBL8);
  636         CSR_WRITE_4(MTD_TCRRCR, TCR_TFTSF | RCR_RBLEN | RCR_RPBL512);
  637         if (sc->sc_devid == PCI_PRODUCT_MYSON_MTD891) {
  638                 CSR_SETBIT(MTD_BCR, BCR_PROG);
  639                 CSR_SETBIT(MTD_TCRRCR, TCR_ENHANCED);
  640         }
  641 
  642         if (ifp->if_flags & IFF_PROMISC)
  643                 CSR_SETBIT(MTD_TCRRCR, RCR_PROM);
  644         else
  645                 CSR_CLRBIT(MTD_TCRRCR, RCR_PROM);
  646 
  647         if (ifp->if_flags & IFF_BROADCAST)
  648                 CSR_SETBIT(MTD_TCRRCR, RCR_AB);
  649         else
  650                 CSR_CLRBIT(MTD_TCRRCR, RCR_AB);
  651 
  652         mtd_setmulti(sc);
  653 
  654         if (mtd_list_rx_init(sc)) {
  655                 printf("%s: can't allocate memory for rx buffers\n",
  656                     sc->sc_dev.dv_xname);
  657                 splx(s);
  658                 return;
  659         }
  660         mtd_list_tx_init(sc);
  661 
  662         CSR_WRITE_4(MTD_RXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
  663             offsetof(struct mtd_list_data, mtd_rx_list[0]));
  664         CSR_WRITE_4(MTD_TXLBA, sc->sc_listmap->dm_segs[0].ds_addr +
  665             offsetof(struct mtd_list_data, mtd_tx_list[0]));
  666 
  667         /*
  668          * Enable interrupts.
  669          */
  670         CSR_WRITE_4(MTD_IMR, IMR_INTRS);
  671         CSR_WRITE_4(MTD_ISR, 0xffffffff);
  672 
  673         /* Enable receiver and transmitter */
  674         CSR_SETBIT(MTD_TCRRCR, TCR_TE | RCR_RE);
  675         CSR_WRITE_4(MTD_RXPDR, 0xffffffff);
  676 
  677         ifp->if_flags |= IFF_RUNNING;
  678         ifq_clr_oactive(&ifp->if_snd);
  679         splx(s);
  680 }
  681 
  682 
  683 /*
  684  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
  685  * to the mbuf data regions directly in the transmit lists. We also save a
  686  * copy of the pointers since the transmit list fragment pointers are
  687  * physical addresses.
  688  */
  689 static void
  690 mtd_start(struct ifnet *ifp)
  691 {
  692         struct mtd_softc *sc = ifp->if_softc;
  693         struct mbuf *m_head = NULL;
  694         int idx;
  695 
  696         if (sc->mtd_cdata.mtd_tx_cnt) {
  697                 ifq_set_oactive(&ifp->if_snd);
  698                 return;
  699         }
  700 
  701         idx = sc->mtd_cdata.mtd_tx_prod;
  702         while (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf == NULL) {
  703                 m_head = ifq_dequeue(&ifp->if_snd);
  704                 if (m_head == NULL)
  705                         break;
  706 
  707                 if (mtd_encap(sc, m_head, &idx)) {
  708                         ifq_set_oactive(&ifp->if_snd);
  709                         break;
  710                 }
  711 
  712                 /*
  713                  * If there's a BPF listener, bounce a copy of this frame
  714                  * to him.
  715                  */
  716 #if NBPFILTER > 0
  717                 if (ifp->if_bpf != NULL)
  718                         bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
  719 #endif
  720         }
  721 
  722         if (idx == sc->mtd_cdata.mtd_tx_prod)
  723                 return;
  724 
  725         /* Transmit */
  726         sc->mtd_cdata.mtd_tx_prod = idx;
  727         CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
  728 
  729         /*
  730          * Set a timeout in case the chip goes out to lunch.
  731          */
  732         ifp->if_timer = 5;
  733 }
  734 
  735 
  736 static void
  737 mtd_stop(struct ifnet *ifp)
  738 {
  739         struct mtd_softc *sc = ifp->if_softc;
  740         int i;
  741 
  742         ifp->if_timer = 0;
  743         ifp->if_flags &= ~IFF_RUNNING;
  744         ifq_clr_oactive(&ifp->if_snd);
  745 
  746         CSR_CLRBIT(MTD_TCRRCR, (RCR_RE | TCR_TE));
  747         CSR_WRITE_4(MTD_IMR, 0);
  748         CSR_WRITE_4(MTD_TXLBA, 0);
  749         CSR_WRITE_4(MTD_RXLBA, 0);
  750 
  751         /*
  752          * Free data in the RX lists.
  753          */
  754         for (i = 0; i < MTD_RX_LIST_CNT; i++) {
  755                 if (sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_nsegs != 0) {
  756                         bus_dmamap_t map = sc->mtd_cdata.mtd_rx_chain[i].sd_map;
  757 
  758                         bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
  759                             BUS_DMASYNC_POSTREAD);
  760                         bus_dmamap_unload(sc->sc_dmat, map);
  761                 }
  762                 if (sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf != NULL) {
  763                         m_freem(sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf);
  764                         sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
  765                 }
  766         }
  767         bzero(&sc->mtd_ldata->mtd_rx_list, sizeof(sc->mtd_ldata->mtd_rx_list));
  768 
  769         /*
  770          * Free the TX list buffers.
  771          */
  772         for (i = 0; i < MTD_TX_LIST_CNT; i++) {
  773                 if (sc->mtd_cdata.mtd_tx_chain[i].sd_map->dm_nsegs != 0) {
  774                         bus_dmamap_t map = sc->mtd_cdata.mtd_tx_chain[i].sd_map;
  775 
  776                         bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
  777                             BUS_DMASYNC_POSTWRITE);
  778                         bus_dmamap_unload(sc->sc_dmat, map);
  779                 }
  780                 if (sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf != NULL) {
  781                         m_freem(sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf);
  782                         sc->mtd_cdata.mtd_tx_chain[i].sd_mbuf = NULL;
  783                 }
  784         }
  785 
  786         bzero(&sc->mtd_ldata->mtd_tx_list, sizeof(sc->mtd_ldata->mtd_tx_list));
  787 
  788 }
  789 
  790 
  791 static void
  792 mtd_watchdog(struct ifnet *ifp)
  793 {
  794         struct mtd_softc *sc = ifp->if_softc;
  795 
  796         ifp->if_oerrors++;
  797         printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
  798 
  799         mtd_init(ifp);
  800 
  801         if (!ifq_empty(&ifp->if_snd))
  802                 mtd_start(ifp);
  803 }
  804 
  805 
  806 int
  807 mtd_intr(void *xsc)
  808 {
  809         struct mtd_softc *sc = xsc;
  810         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  811         u_int32_t status;
  812         int claimed = 0;
  813 
  814         /* Suppress unwanted interrupts */
  815         if (!(ifp->if_flags & IFF_RUNNING)) {
  816                 if (CSR_READ_4(MTD_ISR) & ISR_INTRS)
  817                         mtd_stop(ifp);
  818                 return (claimed);
  819         }
  820 
  821         /* Disable interrupts. */
  822         CSR_WRITE_4(MTD_IMR, 0);
  823 
  824         while((status = CSR_READ_4(MTD_ISR)) & ISR_INTRS) {
  825                 claimed = 1;
  826 
  827                 CSR_WRITE_4(MTD_ISR, status);
  828 
  829                 /* RX interrupt. */
  830                 if (status & ISR_RI) {
  831                         if (mtd_rxeof(sc) == 0)
  832                                 while(mtd_rx_resync(sc))
  833                                         mtd_rxeof(sc);
  834                 }
  835 
  836                 /* RX error interrupt. */
  837                 if (status & (ISR_RXERI | ISR_RBU))
  838                         ifp->if_ierrors++;
  839 
  840                 /* TX interrupt. */
  841                 if (status & (ISR_TI | ISR_ETI | ISR_TBU))
  842                         mtd_txeof(sc);
  843 
  844                 /* Fatal bus error interrupt. */
  845                 if (status & ISR_FBE) {
  846                         mtd_reset(sc);
  847                         mtd_start(ifp);
  848                 }
  849         }
  850 
  851         /* Re-enable interrupts. */
  852         CSR_WRITE_4(MTD_IMR, IMR_INTRS);
  853 
  854         if (!ifq_empty(&ifp->if_snd))
  855                 mtd_start(ifp);
  856 
  857         return (claimed);
  858 }
  859 
  860 
  861 /*
  862  * A frame has been uploaded: pass the resulting mbuf chain up to
  863  * the higher level protocols.
  864  */
  865 static int
  866 mtd_rxeof(struct mtd_softc *sc)
  867 {
  868         struct mbuf_list ml = MBUF_LIST_INITIALIZER();
  869         struct mbuf *m;
  870         struct ifnet *ifp;
  871         struct mtd_rx_desc *cur_rx;
  872         int i, total_len = 0, consumed = 0;
  873         u_int32_t rxstat;
  874 
  875         ifp = &sc->sc_arpcom.ac_if;
  876         i = sc->mtd_cdata.mtd_rx_prod;
  877 
  878         while(!(sc->mtd_ldata->mtd_rx_list[i].rd_rsr & htole32(RSR_OWN))) {
  879                 struct mbuf *m0 = NULL;
  880 
  881                 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
  882                     offsetof(struct mtd_list_data, mtd_rx_list[i]),
  883                     sizeof(struct mtd_rx_desc),
  884                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  885 
  886                 cur_rx = &sc->mtd_ldata->mtd_rx_list[i];
  887                 rxstat = letoh32(cur_rx->rd_rsr);
  888                 m = sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf;
  889                 total_len = RSR_FLNG_GET(rxstat);
  890 
  891                 sc->mtd_cdata.mtd_rx_chain[i].sd_mbuf = NULL;
  892 
  893                 /*
  894                  * If an error occurs, update stats, clear the
  895                  * status word and leave the mbuf cluster in place:
  896                  * it should simply get re-used next time this descriptor
  897                  * comes up in the ring.
  898                  */
  899                 if (rxstat & RSR_RXER) {
  900                         ifp->if_ierrors++;
  901                         mtd_newbuf(sc, i, m);
  902                         if (rxstat & RSR_CRC) {
  903                                 i = (i + 1) % MTD_RX_LIST_CNT;
  904                                 continue;
  905                         } else {
  906                                 mtd_init(ifp);
  907                                 break;
  908                         }
  909                 }
  910 
  911                 /* No errors; receive the packet. */    
  912                 total_len -= ETHER_CRC_LEN;
  913 
  914                 bus_dmamap_sync(sc->sc_dmat, sc->mtd_cdata.mtd_rx_chain[i].sd_map,
  915                     0, sc->mtd_cdata.mtd_rx_chain[i].sd_map->dm_mapsize,
  916                     BUS_DMASYNC_POSTREAD);
  917 
  918                 m0 = m_devget(mtod(m, char *), total_len,  ETHER_ALIGN);
  919                 mtd_newbuf(sc, i, m);
  920                 i = (i + 1) % MTD_RX_LIST_CNT;
  921                 if (m0 == NULL) {
  922                         ifp->if_ierrors++;
  923                         continue;
  924                 }
  925                 m = m0;
  926 
  927                 consumed++;
  928                 ml_enqueue(&ml, m);
  929         }
  930 
  931         if_input(ifp, &ml);
  932 
  933         sc->mtd_cdata.mtd_rx_prod = i;
  934 
  935         return (consumed);
  936 }
  937 
  938 
  939 /*
  940  * This routine searches the RX ring for dirty descriptors in the
  941  * event that the rxeof routine falls out of sync with the chip's
  942  * current descriptor pointer. This may happen sometimes as a result
  943  * of a "no RX buffer available" condition that happens when the chip
  944  * consumes all of the RX buffers before the driver has a chance to
  945  * process the RX ring. This routine may need to be called more than
  946  * once to bring the driver back in sync with the chip, however we
  947  * should still be getting RX DONE interrupts to drive the search
  948  * for new packets in the RX ring, so we should catch up eventually.
  949  */
  950 static int
  951 mtd_rx_resync(struct mtd_softc *sc)
  952 {
  953         int i, pos;
  954         struct mtd_rx_desc *cur_rx;
  955 
  956         pos = sc->mtd_cdata.mtd_rx_prod;
  957 
  958         for (i = 0; i < MTD_RX_LIST_CNT; i++) {
  959                 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
  960                     offsetof(struct mtd_list_data, mtd_rx_list[pos]),
  961                     sizeof(struct mtd_rx_desc),
  962                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  963 
  964                 cur_rx = &sc->mtd_ldata->mtd_rx_list[pos];
  965                 if (!(cur_rx->rd_rsr & htole32(RSR_OWN)))
  966                         break;
  967                 pos = (pos + 1) % MTD_RX_LIST_CNT;
  968         }
  969 
  970         /* If the ring really is empty, then just return. */
  971         if (i == MTD_RX_LIST_CNT)
  972                 return (0);
  973 
  974         /* We've fallen behind the chip: catch it. */
  975         sc->mtd_cdata.mtd_rx_prod = pos;
  976 
  977         return (EAGAIN);
  978 }
  979 
  980 
  981 /*
  982  * A frame was downloaded to the chip. It's safe for us to clean up
  983  * the list buffers.
  984  */
  985 static void
  986 mtd_txeof(struct mtd_softc *sc)
  987 {
  988         struct mtd_tx_desc *cur_tx = NULL;
  989         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  990         int idx;
  991 
  992         /* Clear the timeout timer. */
  993         ifp->if_timer = 0;
  994 
  995         /*
  996          * Go through our tx list and free mbufs for those
  997          * frames that have been transmitted.
  998          */
  999         idx = sc->mtd_cdata.mtd_tx_cons;
 1000         while(idx != sc->mtd_cdata.mtd_tx_prod) {
 1001                 u_int32_t txstat;
 1002 
 1003                 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
 1004                     offsetof(struct mtd_list_data, mtd_tx_list[idx]),
 1005                     sizeof(struct mtd_tx_desc),
 1006                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
 1007 
 1008                 cur_tx = &sc->mtd_ldata->mtd_tx_list[idx];
 1009                 txstat = letoh32(cur_tx->td_tsw);
 1010 
 1011                 if (txstat & TSW_OWN || txstat == TSW_UNSENT)
 1012                         break;
 1013 
 1014                 if (!(cur_tx->td_tcw & htole32(TCW_LD))) {
 1015                         sc->mtd_cdata.mtd_tx_cnt--;
 1016                         idx = (idx + 1) % MTD_TX_LIST_CNT;
 1017                         continue;
 1018                 }
 1019 
 1020                 if (CSR_READ_4(MTD_TCRRCR) & TCR_ENHANCED)
 1021                         ifp->if_collisions += TSR_NCR_GET(CSR_READ_4(MTD_TSR));
 1022                 else {
 1023                         if (txstat & TSW_TXERR) {
 1024                                 ifp->if_oerrors++;
 1025                                 if (txstat & TSW_EC)
 1026                                         ifp->if_collisions++;
 1027                                 if (txstat & TSW_LC)
 1028                                         ifp->if_collisions++;
 1029                         }
 1030                         ifp->if_collisions += TSW_NCR_GET(txstat);
 1031                 }
 1032 
 1033                 if (sc->mtd_cdata.mtd_tx_chain[idx].sd_map->dm_nsegs != 0) {
 1034                         bus_dmamap_t map =
 1035                             sc->mtd_cdata.mtd_tx_chain[idx].sd_map;
 1036                         bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
 1037                             BUS_DMASYNC_POSTWRITE);
 1038                         bus_dmamap_unload(sc->sc_dmat, map);
 1039                 }
 1040                 if (sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf != NULL) {
 1041                         m_freem(sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf);
 1042                         sc->mtd_cdata.mtd_tx_chain[idx].sd_mbuf = NULL;
 1043                 }
 1044                 sc->mtd_cdata.mtd_tx_cnt--;
 1045                 idx = (idx + 1) % MTD_TX_LIST_CNT;
 1046         }
 1047 
 1048         if (cur_tx != NULL) {
 1049                 ifq_clr_oactive(&ifp->if_snd);
 1050                 sc->mtd_cdata.mtd_tx_cons = idx;
 1051         } else
 1052                 if (sc->mtd_ldata->mtd_tx_list[idx].td_tsw ==
 1053                     htole32(TSW_UNSENT)) {
 1054                         sc->mtd_ldata->mtd_tx_list[idx].td_tsw =
 1055                             htole32(TSW_OWN);
 1056                         ifp->if_timer = 5;
 1057                         CSR_WRITE_4(MTD_TXPDR, 0xffffffff);
 1058                 }
 1059 }
 1060 
 1061 struct cfdriver mtd_cd = {
 1062         0, "mtd", DV_IFNET
 1063 };

Cache object: df565cbc717bff1ab23ca1ed8f618af1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.