The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/qbus/if_qe.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: if_qe.c,v 1.81 2019/05/28 07:41:49 msaitoh Exp $ */
    2 /*
    3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   24  */
   25 
   26 /*
   27  * Driver for DEQNA/DELQA ethernet cards.
   28  * Things that is still to do:
   29  *      Handle ubaresets. Does not work at all right now.
   30  *      Fix ALLMULTI reception. But someone must tell me how...
   31  *      Collect statistics.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.81 2019/05/28 07:41:49 msaitoh Exp $");
   36 
   37 #include "opt_inet.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/mbuf.h>
   41 #include <sys/socket.h>
   42 #include <sys/device.h>
   43 #include <sys/systm.h>
   44 #include <sys/sockio.h>
   45 
   46 #include <net/if.h>
   47 #include <net/if_ether.h>
   48 #include <net/if_dl.h>
   49 #include <net/bpf.h>
   50 
   51 #include <netinet/in.h>
   52 #include <netinet/if_inarp.h>
   53 
   54 #include <sys/bus.h>
   55 
   56 #include <dev/qbus/ubavar.h>
   57 #include <dev/qbus/if_qereg.h>
   58 
   59 #include "ioconf.h"
   60 
   61 #define RXDESCS 30      /* # of receive descriptors */
   62 #define TXDESCS 60      /* # transmit descs */
   63 
   64 /*
   65  * Structure containing the elements that must be in DMA-safe memory.
   66  */
   67 struct qe_cdata {
   68         struct qe_ring  qc_recv[RXDESCS+1];     /* Receive descriptors */
   69         struct qe_ring  qc_xmit[TXDESCS+1];     /* Transmit descriptors */
   70         uint8_t qc_setup[128];          /* Setup packet layout */
   71 };
   72 
   73 struct  qe_softc {
   74         device_t        sc_dev;         /* Configuration common part    */
   75         struct uba_softc *sc_uh;        /* our parent */
   76         struct evcnt    sc_intrcnt;     /* Interrupt counting           */
   77         struct ethercom sc_ec;          /* Ethernet common part         */
   78 #define sc_if   sc_ec.ec_if             /* network-visible interface    */
   79         bus_space_tag_t sc_iot;
   80         bus_addr_t      sc_ioh;
   81         bus_dma_tag_t   sc_dmat;
   82         struct qe_cdata *sc_qedata;     /* Descriptor struct            */
   83         struct qe_cdata *sc_pqedata;    /* Unibus address of above      */
   84         struct mbuf*    sc_txmbuf[TXDESCS];
   85         struct mbuf*    sc_rxmbuf[RXDESCS];
   86         bus_dmamap_t    sc_xmtmap[TXDESCS];
   87         bus_dmamap_t    sc_rcvmap[RXDESCS];
   88         bus_dmamap_t    sc_nulldmamap;  /* ethernet padding buffer      */
   89         struct ubinfo   sc_ui;
   90         int             sc_intvec;      /* Interrupt vector             */
   91         int             sc_nexttx;
   92         int             sc_inq;
   93         int             sc_lastack;
   94         int             sc_nextrx;
   95         int             sc_setup;       /* Setup packet in queue        */
   96 };
   97 
   98 static  int     qematch(device_t, cfdata_t, void *);
   99 static  void    qeattach(device_t, device_t, void *);
  100 static  void    qeinit(struct qe_softc *);
  101 static  void    qestart(struct ifnet *);
  102 static  void    qeintr(void *);
  103 static  int     qeioctl(struct ifnet *, u_long, void *);
  104 static  int     qe_add_rxbuf(struct qe_softc *, int);
  105 static  void    qe_setup(struct qe_softc *);
  106 static  void    qetimeout(struct ifnet *);
  107 
  108 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
  109     qematch, qeattach, NULL, NULL);
  110 
  111 #define QE_WCSR(csr, val) \
  112         bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
  113 #define QE_RCSR(csr) \
  114         bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
  115 
  116 #define LOWORD(x)       ((int)(x) & 0xffff)
  117 #define HIWORD(x)       (((int)(x) >> 16) & 0x3f)
  118 
  119 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
  120 
  121 /*
  122  * Check for present DEQNA. Done by sending a fake setup packet
  123  * and wait for interrupt.
  124  */
  125 int
  126 qematch(device_t parent, cfdata_t cf, void *aux)
  127 {
  128         struct  qe_softc ssc;
  129         struct  qe_softc *sc = &ssc;
  130         struct  uba_attach_args *ua = aux;
  131         struct  uba_softc *uh = device_private(parent);
  132         struct ubinfo ui;
  133 
  134 #define PROBESIZE       4096
  135         struct qe_ring *ring;
  136         struct  qe_ring *rp;
  137         int error, match;
  138 
  139         ring = malloc(PROBESIZE, M_TEMP, M_WAITOK | M_ZERO);
  140         memset(sc, 0, sizeof(*sc));
  141         sc->sc_iot = ua->ua_iot;
  142         sc->sc_ioh = ua->ua_ioh;
  143         sc->sc_dmat = ua->ua_dmat;
  144 
  145         uh->uh_lastiv -= 4;
  146         QE_WCSR(QE_CSR_CSR, QE_RESET);
  147         QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
  148 
  149         /*
  150          * Map the ring area. Actually this is done only to be able to
  151          * send and receive a internal packet; some junk is loopbacked
  152          * so that the DEQNA has a reason to interrupt.
  153          */
  154         ui.ui_size = PROBESIZE;
  155         ui.ui_vaddr = (void *)&ring[0];
  156         if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
  157                 match = 0;
  158                 goto out0;
  159         }
  160 
  161         /*
  162          * Init a simple "fake" receive and transmit descriptor that
  163          * points to some unused area. Send a fake setup packet.
  164          */
  165         rp = (void *)ui.ui_baddr;
  166         ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
  167         ring[0].qe_addr_lo = LOWORD(&rp[4]);
  168         ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
  169         ring[0].qe_buf_len = -64;
  170 
  171         ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
  172         ring[2].qe_addr_lo = LOWORD(&rp[4]);
  173         ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
  174         ring[2].qe_buf_len = -(1500/2);
  175 
  176         QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
  177         DELAY(1000);
  178 
  179         /*
  180          * Start the interface and wait for the packet.
  181          */
  182         QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
  183         QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
  184         QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
  185         QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
  186         QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
  187         DELAY(10000);
  188 
  189         match = 1;
  190 
  191         /*
  192          * All done with the bus resources.
  193          */
  194         ubfree(uh, &ui);
  195 out0:   free(ring, M_TEMP);
  196         return match;
  197 }
  198 
  199 /*
  200  * Interface exists: make available by filling in network interface
  201  * record.  System will initialize the interface when it is ready
  202  * to accept packets.
  203  */
  204 void
  205 qeattach(device_t parent, device_t self, void *aux)
  206 {
  207         struct uba_attach_args *ua = aux;
  208         struct qe_softc *sc = device_private(self);
  209         struct ifnet *ifp = &sc->sc_if;
  210         struct qe_ring *rp;
  211         uint8_t enaddr[ETHER_ADDR_LEN];
  212         int i, error;
  213         char *nullbuf;
  214 
  215         sc->sc_dev = self;
  216         sc->sc_uh = device_private(parent);
  217         sc->sc_iot = ua->ua_iot;
  218         sc->sc_ioh = ua->ua_ioh;
  219         sc->sc_dmat = ua->ua_dmat;
  220 
  221         /*
  222          * Allocate DMA safe memory for descriptors and setup memory.
  223          */
  224 
  225         sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
  226         if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
  227                 aprint_error(": unable to ubmemalloc(), error = %d\n", error);
  228                 return;
  229         }
  230         sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
  231         sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
  232 
  233         /*
  234          * Zero the newly allocated memory.
  235          */
  236         memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
  237         nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
  238         /*
  239          * Create the transmit descriptor DMA maps. We take advantage
  240          * of the fact that the Qbus address space is big, and therefore
  241          * allocate map registers for all transmit descriptors also,
  242          * so that we can avoid this each time we send a packet.
  243          */
  244         for (i = 0; i < TXDESCS; i++) {
  245                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
  246                     1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
  247                     &sc->sc_xmtmap[i]))) {
  248                         aprint_error(
  249                             ": unable to create tx DMA map %d, error = %d\n",
  250                             i, error);
  251                         goto fail_4;
  252                 }
  253         }
  254 
  255         /*
  256          * Create receive buffer DMA maps.
  257          */
  258         for (i = 0; i < RXDESCS; i++) {
  259                 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
  260                     MCLBYTES, 0, BUS_DMA_NOWAIT,
  261                     &sc->sc_rcvmap[i]))) {
  262                         aprint_error(
  263                             ": unable to create rx DMA map %d, error = %d\n",
  264                             i, error);
  265                         goto fail_5;
  266                 }
  267         }
  268         /*
  269          * Pre-allocate the receive buffers.
  270          */
  271         for (i = 0; i < RXDESCS; i++) {
  272                 if ((error = qe_add_rxbuf(sc, i)) != 0) {
  273                         aprint_error(
  274                             ": unable to allocate or map rx buffer %d,"
  275                             " error = %d\n", i, error);
  276                         goto fail_6;
  277                 }
  278         }
  279 
  280         if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
  281             ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
  282                 aprint_error(
  283                     ": unable to create pad buffer DMA map, error = %d\n",
  284                     error);
  285                 goto fail_6;
  286         }
  287         if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
  288             nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
  289                 aprint_error(
  290                     ": unable to load pad buffer DMA map, error = %d\n",
  291                     error);
  292                 goto fail_7;
  293         }
  294         bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
  295             BUS_DMASYNC_PREWRITE);
  296 
  297         /*
  298          * Create ring loops of the buffer chains.
  299          * This is only done once.
  300          */
  301 
  302         rp = sc->sc_qedata->qc_recv;
  303         rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
  304         rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
  305             QE_VALID | QE_CHAIN;
  306         rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
  307 
  308         rp = sc->sc_qedata->qc_xmit;
  309         rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
  310         rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
  311             QE_VALID | QE_CHAIN;
  312         rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
  313 
  314         /*
  315          * Get the vector that were set at match time, and remember it.
  316          */
  317         sc->sc_intvec = sc->sc_uh->uh_lastiv;
  318         QE_WCSR(QE_CSR_CSR, QE_RESET);
  319         DELAY(1000);
  320         QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
  321 
  322         /*
  323          * Read out ethernet address and tell which type this card is.
  324          */
  325         for (i = 0; i < 6; i++)
  326                 enaddr[i] = QE_RCSR(i * 2) & 0xff;
  327 
  328         QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
  329         aprint_normal(": %s, hardware address %s\n",
  330                 QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
  331                 ether_sprintf(enaddr));
  332 
  333         QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
  334 
  335         uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
  336                 sc, &sc->sc_intrcnt);
  337         evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
  338                 device_xname(sc->sc_dev), "intr");
  339 
  340         strcpy(ifp->if_xname, device_xname(sc->sc_dev));
  341         ifp->if_softc = sc;
  342         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  343         ifp->if_start = qestart;
  344         ifp->if_ioctl = qeioctl;
  345         ifp->if_watchdog = qetimeout;
  346         IFQ_SET_READY(&ifp->if_snd);
  347 
  348         /*
  349          * Attach the interface.
  350          */
  351         if_attach(ifp);
  352         ether_ifattach(ifp, enaddr);
  353 
  354         return;
  355 
  356         /*
  357          * Free any resources we've allocated during the failed attach
  358          * attempt.  Do this in reverse order and fall through.
  359          */
  360  fail_7:
  361         bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
  362  fail_6:
  363         for (i = 0; i < RXDESCS; i++) {
  364                 if (sc->sc_rxmbuf[i] != NULL) {
  365                         bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
  366                         m_freem(sc->sc_rxmbuf[i]);
  367                 }
  368         }
  369  fail_5:
  370         for (i = 0; i < RXDESCS; i++) {
  371                 if (sc->sc_rcvmap[i] != NULL)
  372                         bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
  373         }
  374  fail_4:
  375         for (i = 0; i < TXDESCS; i++) {
  376                 if (sc->sc_xmtmap[i] != NULL)
  377                         bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
  378         }
  379 }
  380 
  381 /*
  382  * Initialization of interface.
  383  */
  384 void
  385 qeinit(struct qe_softc *sc)
  386 {
  387         struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
  388         struct qe_cdata *qc = sc->sc_qedata;
  389         int i;
  390 
  391 
  392         /*
  393          * Reset the interface.
  394          */
  395         QE_WCSR(QE_CSR_CSR, QE_RESET);
  396         DELAY(1000);
  397         QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
  398         QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
  399 
  400         sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
  401         /*
  402          * Release and init transmit descriptors.
  403          */
  404         for (i = 0; i < TXDESCS; i++) {
  405                 if (sc->sc_txmbuf[i]) {
  406                         bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
  407                         m_freem(sc->sc_txmbuf[i]);
  408                         sc->sc_txmbuf[i] = 0;
  409                 }
  410                 qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
  411                 qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
  412         }
  413 
  414 
  415         /*
  416          * Init receive descriptors.
  417          */
  418         for (i = 0; i < RXDESCS; i++)
  419                 qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
  420         sc->sc_nextrx = 0;
  421 
  422         /*
  423          * Write the descriptor addresses to the device.
  424          * Receiving packets will be enabled in the interrupt routine.
  425          */
  426         QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
  427         QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
  428         QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
  429 
  430         ifp->if_flags |= IFF_RUNNING;
  431         ifp->if_flags &= ~IFF_OACTIVE;
  432 
  433         /*
  434          * Send a setup frame.
  435          * This will start the transmit machinery as well.
  436          */
  437         qe_setup(sc);
  438 
  439 }
  440 
  441 /*
  442  * Start output on interface.
  443  */
  444 void
  445 qestart(struct ifnet *ifp)
  446 {
  447         struct qe_softc *sc = ifp->if_softc;
  448         struct qe_cdata *qc = sc->sc_qedata;
  449         paddr_t buffer;
  450         struct mbuf *m, *m0;
  451         int idx, len, s, i, totlen, buflen;
  452         short orword, csr;
  453 
  454         if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
  455                 return;
  456 
  457         s = splnet();
  458         while (sc->sc_inq < (TXDESCS - 1)) {
  459 
  460                 if (sc->sc_setup) {
  461                         qe_setup(sc);
  462                         continue;
  463                 }
  464                 idx = sc->sc_nexttx;
  465                 IFQ_POLL(&ifp->if_snd, m);
  466                 if (m == 0)
  467                         goto out;
  468                 /*
  469                  * Count number of mbufs in chain.
  470                  * Always do DMA directly from mbufs, therefore the transmit
  471                  * ring is really big.
  472                  */
  473                 for (m0 = m, i = 0; m0; m0 = m0->m_next)
  474                         if (m0->m_len)
  475                                 i++;
  476                 if (m->m_pkthdr.len < ETHER_PAD_LEN) {
  477                         buflen = ETHER_PAD_LEN;
  478                         i++;
  479                 } else
  480                         buflen = m->m_pkthdr.len;
  481                 if (i >= TXDESCS)
  482                         panic("qestart");
  483 
  484                 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
  485                         ifp->if_flags |= IFF_OACTIVE;
  486                         goto out;
  487                 }
  488 
  489                 IFQ_DEQUEUE(&ifp->if_snd, m);
  490 
  491                 bpf_mtap(ifp, m, BPF_D_OUT);
  492                 /*
  493                  * m now points to a mbuf chain that can be loaded.
  494                  * Loop around and set it.
  495                  */
  496                 totlen = 0;
  497                 for (m0 = m; ; m0 = m0->m_next) {
  498                         if (m0) {
  499                                 if (m0->m_len == 0)
  500                                         continue;
  501                                 bus_dmamap_load(sc->sc_dmat,
  502                                     sc->sc_xmtmap[idx], mtod(m0, void *),
  503                                     m0->m_len, 0, 0);
  504                                 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
  505                                 len = m0->m_len;
  506                         } else if (totlen < ETHER_PAD_LEN) {
  507                                 buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
  508                                 len = ETHER_PAD_LEN - totlen;
  509                         } else {
  510                                 break;
  511                         }
  512 
  513                         totlen += len;
  514                         /* Word alignment calc */
  515                         orword = 0;
  516                         if (totlen == buflen) {
  517                                 orword |= QE_EOMSG;
  518                                 sc->sc_txmbuf[idx] = m;
  519                         }
  520                         if ((buffer & 1) || (len & 1))
  521                                 len += 2;
  522                         if (buffer & 1)
  523                                 orword |= QE_ODDBEGIN;
  524                         if ((buffer + len) & 1)
  525                                 orword |= QE_ODDEND;
  526                         qc->qc_xmit[idx].qe_buf_len = -(len/2);
  527                         qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
  528                         qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
  529                         qc->qc_xmit[idx].qe_flag =
  530                             qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
  531                         qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
  532                         if (++idx == TXDESCS)
  533                                 idx = 0;
  534                         sc->sc_inq++;
  535                         if (m0 == NULL)
  536                                 break;
  537                 }
  538 #ifdef DIAGNOSTIC
  539                 if (totlen != buflen)
  540                         panic("qestart: len fault");
  541 #endif
  542 
  543                 /*
  544                  * Kick off the transmit logic, if it is stopped.
  545                  */
  546                 csr = QE_RCSR(QE_CSR_CSR);
  547                 if (csr & QE_XL_INVALID) {
  548                         QE_WCSR(QE_CSR_XMTL,
  549                             LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
  550                         QE_WCSR(QE_CSR_XMTH,
  551                             HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
  552                 }
  553                 sc->sc_nexttx = idx;
  554         }
  555         if (sc->sc_inq == (TXDESCS - 1))
  556                 ifp->if_flags |= IFF_OACTIVE;
  557 
  558 out:    if (sc->sc_inq)
  559                 ifp->if_timer = 5; /* If transmit logic dies */
  560         splx(s);
  561 }
  562 
  563 static void
  564 qeintr(void *arg)
  565 {
  566         struct qe_softc *sc = arg;
  567         struct qe_cdata *qc = sc->sc_qedata;
  568         struct ifnet *ifp = &sc->sc_if;
  569         struct mbuf *m;
  570         int csr, status1, status2, len;
  571 
  572         csr = QE_RCSR(QE_CSR_CSR);
  573 
  574         QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
  575             QE_RCV_INT | QE_ILOOP);
  576 
  577         if (csr & QE_RCV_INT)
  578                 while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
  579                         status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
  580                         status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
  581 
  582                         m = sc->sc_rxmbuf[sc->sc_nextrx];
  583                         len = ((status1 & QE_RBL_HI) |
  584                             (status2 & QE_RBL_LO)) + 60;
  585                         qe_add_rxbuf(sc, sc->sc_nextrx);
  586                         m_set_rcvif(m, ifp);
  587                         m->m_pkthdr.len = m->m_len = len;
  588                         if (++sc->sc_nextrx == RXDESCS)
  589                                 sc->sc_nextrx = 0;
  590                         if ((status1 & QE_ESETUP) == 0)
  591                                 if_percpuq_enqueue(ifp->if_percpuq, m);
  592                         else
  593                                 m_freem(m);
  594                 }
  595 
  596         if (csr & (QE_XMIT_INT | QE_XL_INVALID)) {
  597                 while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
  598                         int idx = sc->sc_lastack;
  599 
  600                         sc->sc_inq--;
  601                         if (++sc->sc_lastack == TXDESCS)
  602                                 sc->sc_lastack = 0;
  603 
  604                         /* XXX collect statistics */
  605                         qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
  606                         qc->qc_xmit[idx].qe_status1 =
  607                             qc->qc_xmit[idx].qe_flag = QE_NOTYET;
  608 
  609                         if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
  610                                 continue;
  611                         if (sc->sc_txmbuf[idx] == NULL ||
  612                             sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
  613                                 bus_dmamap_unload(sc->sc_dmat,
  614                                     sc->sc_xmtmap[idx]);
  615                         if (sc->sc_txmbuf[idx]) {
  616                                 m_freem(sc->sc_txmbuf[idx]);
  617                                 sc->sc_txmbuf[idx] = NULL;
  618                         }
  619                 }
  620                 ifp->if_timer = 0;
  621                 ifp->if_flags &= ~IFF_OACTIVE;
  622                 qestart(ifp); /* Put in more in queue */
  623         }
  624         /*
  625          * How can the receive list get invalid???
  626          * Verified that it happens anyway.
  627          */
  628         if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
  629             (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
  630                 QE_WCSR(QE_CSR_RCLL,
  631                     LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
  632                 QE_WCSR(QE_CSR_RCLH,
  633                     HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
  634         }
  635 }
  636 
  637 /*
  638  * Process an ioctl request.
  639  */
  640 int
  641 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
  642 {
  643         struct qe_softc *sc = ifp->if_softc;
  644         struct ifaddr *ifa = (struct ifaddr *)data;
  645         int s = splnet(), error = 0;
  646 
  647         switch (cmd) {
  648 
  649         case SIOCINITIFADDR:
  650                 ifp->if_flags |= IFF_UP;
  651                 switch (ifa->ifa_addr->sa_family) {
  652 #ifdef INET
  653                 case AF_INET:
  654                         qeinit(sc);
  655                         arp_ifinit(ifp, ifa);
  656                         break;
  657 #endif
  658                 }
  659                 break;
  660 
  661         case SIOCSIFFLAGS:
  662                 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
  663                         break;
  664                 /* XXX re-use ether_ioctl() */
  665                 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
  666                 case IFF_RUNNING:
  667                         /*
  668                          * If interface is marked down and it is running,
  669                          * stop it. (by disabling receive mechanism).
  670                          */
  671                         QE_WCSR(QE_CSR_CSR,
  672                             QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
  673                         ifp->if_flags &= ~IFF_RUNNING;
  674                         break;
  675                 case IFF_UP:
  676                         /*
  677                          * If interface it marked up and it is stopped, then
  678                          * start it.
  679                          */
  680                         qeinit(sc);
  681                         break;
  682                 case IFF_UP | IFF_RUNNING:
  683                         /*
  684                          * Send a new setup packet to match any new changes.
  685                          * (Like IFF_PROMISC etc)
  686                          */
  687                         qe_setup(sc);
  688                         break;
  689                 case 0:
  690                         break;
  691                 }
  692                 break;
  693 
  694         case SIOCADDMULTI:
  695         case SIOCDELMULTI:
  696                 /*
  697                  * Update our multicast list.
  698                  */
  699                 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
  700                         /*
  701                          * Multicast list has changed; set the hardware filter
  702                          * accordingly.
  703                          */
  704                         if (ifp->if_flags & IFF_RUNNING)
  705                                 qe_setup(sc);
  706                         error = 0;
  707                 }
  708                 break;
  709 
  710         default:
  711                 error = ether_ioctl(ifp, cmd, data);
  712         }
  713         splx(s);
  714         return error;
  715 }
  716 
  717 /*
  718  * Add a receive buffer to the indicated descriptor.
  719  */
  720 int
  721 qe_add_rxbuf(struct qe_softc *sc, int i)
  722 {
  723         struct mbuf *m;
  724         struct qe_ring *rp;
  725         vaddr_t addr;
  726         int error;
  727 
  728         MGETHDR(m, M_DONTWAIT, MT_DATA);
  729         if (m == NULL)
  730                 return ENOBUFS;
  731 
  732         MCLGET(m, M_DONTWAIT);
  733         if ((m->m_flags & M_EXT) == 0) {
  734                 m_freem(m);
  735                 return ENOBUFS;
  736         }
  737 
  738         if (sc->sc_rxmbuf[i] != NULL)
  739                 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
  740 
  741         error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
  742             m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
  743         if (error)
  744                 panic("%s: can't load rx DMA map %d, error = %d",
  745                     device_xname(sc->sc_dev), i, error);
  746         sc->sc_rxmbuf[i] = m;
  747 
  748         bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
  749             sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
  750 
  751         /*
  752          * We know that the mbuf cluster is page aligned. Also, be sure
  753          * that the IP header will be longword aligned.
  754          */
  755         m->m_data += 2;
  756         addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
  757         rp = &sc->sc_qedata->qc_recv[i];
  758         rp->qe_flag = rp->qe_status1 = QE_NOTYET;
  759         rp->qe_addr_lo = LOWORD(addr);
  760         rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
  761         rp->qe_buf_len = -(m->m_ext.ext_size - 2) / 2;
  762 
  763         return 0;
  764 }
  765 
  766 /*
  767  * Create a setup packet and put in queue for sending.
  768  */
  769 void
  770 qe_setup(struct qe_softc *sc)
  771 {
  772         struct ethercom *ec = &sc->sc_ec;
  773         struct ether_multi *enm;
  774         struct ether_multistep step;
  775         struct qe_cdata *qc = sc->sc_qedata;
  776         struct ifnet *ifp = &sc->sc_if;
  777         uint8_t enaddr[ETHER_ADDR_LEN];
  778         int i, j, k, idx, s;
  779 
  780         s = splnet();
  781         if (sc->sc_inq == (TXDESCS - 1)) {
  782                 sc->sc_setup = 1;
  783                 splx(s);
  784                 return;
  785         }
  786         sc->sc_setup = 0;
  787         /*
  788          * Init the setup packet with valid info.
  789          */
  790         memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
  791         memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
  792         for (i = 0; i < ETHER_ADDR_LEN; i++)
  793                 qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
  794 
  795         /*
  796          * Multicast handling. The DEQNA can handle up to 12 direct
  797          * ethernet addresses.
  798          */
  799         j = 3; k = 0;
  800         ifp->if_flags &= ~IFF_ALLMULTI;
  801         ETHER_LOCK(ec);
  802         ETHER_FIRST_MULTI(step, ec, enm);
  803         while (enm != NULL) {
  804                 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
  805                         ifp->if_flags |= IFF_ALLMULTI;
  806                         break;
  807                 }
  808                 for (i = 0; i < ETHER_ADDR_LEN; i++)
  809                         qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
  810                 j++;
  811                 if (j == 8) {
  812                         j = 1; k += 64;
  813                 }
  814                 if (k > 64) {
  815                         ifp->if_flags |= IFF_ALLMULTI;
  816                         break;
  817                 }
  818                 ETHER_NEXT_MULTI(step, enm);
  819         }
  820         ETHER_UNLOCK(ec);
  821         idx = sc->sc_nexttx;
  822         qc->qc_xmit[idx].qe_buf_len = -64;
  823 
  824         /*
  825          * How is the DEQNA turned in ALLMULTI mode???
  826          * Until someone tells me, fall back to PROMISC when more than
  827          * 12 ethernet addresses.
  828          */
  829         if (ifp->if_flags & IFF_ALLMULTI)
  830                 ifp->if_flags |= IFF_PROMISC;
  831         else if (ifp->if_pcount == 0)
  832                 ifp->if_flags &= ~IFF_PROMISC;
  833         if (ifp->if_flags & IFF_PROMISC)
  834                 qc->qc_xmit[idx].qe_buf_len = -65;
  835 
  836         qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
  837         qc->qc_xmit[idx].qe_addr_hi =
  838             HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
  839         qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
  840         qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
  841 
  842         if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
  843                 QE_WCSR(QE_CSR_XMTL,
  844                     LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
  845                 QE_WCSR(QE_CSR_XMTH,
  846                     HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
  847         }
  848 
  849         sc->sc_inq++;
  850         if (++sc->sc_nexttx == TXDESCS)
  851                 sc->sc_nexttx = 0;
  852         splx(s);
  853 }
  854 
  855 /*
  856  * Check for dead transmit logic. Not uncommon.
  857  */
  858 void
  859 qetimeout(struct ifnet *ifp)
  860 {
  861         struct qe_softc *sc = ifp->if_softc;
  862 
  863         if (sc->sc_inq == 0)
  864                 return;
  865 
  866         aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
  867         /*
  868          * Do a reset of interface, to get it going again.
  869          * Will it work by just restart the transmit logic?
  870          */
  871         qeinit(sc);
  872 }

Cache object: d97a4c7599543f090e45a7fb124f2782


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.