The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
   38  *
   39  * $FreeBSD: releng/5.0/sys/dev/hme/if_hme.c 106937 2002-11-14 23:54:55Z sam $
   40  */
   41 
   42 /*
   43  * HME Ethernet module driver.
   44  *
   45  * The HME is e.g. part of the PCIO PCI multi function device.
   46  * It supports TX gathering and TX and RX checksum offloading.
   47  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   48  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   49  * are skipped to make sure the header after the ethernet header is aligned on a
   50  * natural boundary, so this ensures minimal wastage in the most common case.
   51  *
   52  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   53  * maximum packet size (this is not verified). Buffers starting on odd
   54  * boundaries must be mapped so that the burst can start on a natural boundary.
   55  *
   56  * Checksumming is not yet supported.
   57  */
   58 
   59 #define HMEDEBUG
   60 #define KTR_HME         KTR_CT2         /* XXX */
   61 
   62 #include <sys/param.h>
   63 #include <sys/systm.h>
   64 #include <sys/bus.h>
   65 #include <sys/endian.h>
   66 #include <sys/kernel.h>
   67 #include <sys/ktr.h>
   68 #include <sys/mbuf.h>
   69 #include <sys/malloc.h>
   70 #include <sys/socket.h>
   71 #include <sys/sockio.h>
   72 
   73 #include <net/bpf.h>
   74 #include <net/ethernet.h>
   75 #include <net/if.h>
   76 #include <net/if_arp.h>
   77 #include <net/if_dl.h>
   78 #include <net/if_media.h>
   79 
   80 #include <dev/mii/mii.h>
   81 #include <dev/mii/miivar.h>
   82 
   83 #include <machine/bus.h>
   84 
   85 #include <hme/if_hmereg.h>
   86 #include <hme/if_hmevar.h>
   87 
   88 static void     hme_start(struct ifnet *);
   89 static void     hme_stop(struct hme_softc *);
   90 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
   91 static void     hme_tick(void *);
   92 static void     hme_watchdog(struct ifnet *);
   93 #if 0
   94 static void     hme_shutdown(void *);
   95 #endif
   96 static void     hme_init(void *);
   97 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
   98 static int      hme_meminit(struct hme_softc *);
   99 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  100     u_int32_t, u_int32_t);
  101 static void     hme_mifinit(struct hme_softc *);
  102 static void     hme_reset(struct hme_softc *);
  103 static void     hme_setladrf(struct hme_softc *, int);
  104 
  105 static int      hme_mediachange(struct ifnet *);
  106 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  107 
  108 static int      hme_load_mbuf(struct hme_softc *, struct mbuf *);
  109 static void     hme_read(struct hme_softc *, int, int);
  110 static void     hme_eint(struct hme_softc *, u_int);
  111 static void     hme_rint(struct hme_softc *);
  112 static void     hme_tint(struct hme_softc *);
  113 
  114 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  115 static void     hme_rxdma_callback(void *, bus_dma_segment_t *, int, int);
  116 static void     hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
  117 
  118 devclass_t hme_devclass;
  119 
  120 static int hme_nerr;
  121 
  122 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  123 MODULE_DEPEND(hem, miibus, 1, 1, 1);
  124 
  125 #define HME_SPC_READ_4(spc, sc, offs) \
  126         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  127             (sc)->sc_ ## spc ## o + (offs))
  128 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  129         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  130             (sc)->sc_ ## spc ## o + (offs), (v))
  131 
  132 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  133 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  134 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  135 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  136 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  137 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  138 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  139 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  140 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  141 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  142 
  143 #define HME_MAXERR      5
  144 #define HME_WHINE(dev, ...) do {                                        \
  145         if (hme_nerr++ < HME_MAXERR)                                    \
  146                 device_printf(dev, __VA_ARGS__);                        \
  147         if (hme_nerr == HME_MAXERR) {                                   \
  148                 device_printf(dev, "too may errors; not reporting any " \
  149                     "more\n");                                          \
  150         }                                                               \
  151 } while(0)
  152 
  153 int
  154 hme_config(struct hme_softc *sc)
  155 {
  156         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  157         struct mii_softc *child;
  158         bus_size_t size;
  159         int error, rdesc, tdesc, i;
  160 
  161         /*
  162          * HME common initialization.
  163          *
  164          * hme_softc fields that must be initialized by the front-end:
  165          *
  166          * the dma bus tag:
  167          *      sc_dmatag
  168          *
  169          * the bus handles, tags and offsets (splitted for SBus compatability):
  170          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  171          *      sc_erx{t,h,o}   (Receiver Unit registers)
  172          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  173          *      sc_mac{t,h,o}   (MAC registers)
  174          *      sc_mif{t,h,o}   (Managment Interface registers)
  175          *
  176          * the maximum bus burst size:
  177          *      sc_burst
  178          *
  179          */
  180 
  181         /* Make sure the chip is stopped. */
  182         hme_stop(sc);
  183 
  184         /*
  185          * Allocate DMA capable memory
  186          * Buffer descriptors must be aligned on a 2048 byte boundary;
  187          * take this into account when calculating the size. Note that
  188          * the maximum number of descriptors (256) occupies 2048 bytes,
  189          * so we allocate that much regardless of HME_N*DESC.
  190          */
  191         size =  4096;
  192 
  193         error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  194             BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
  195             BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag);
  196         if (error)
  197                 return (error);
  198 
  199         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  200             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  201             1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag);
  202         if (error)
  203                 goto fail_ptag;
  204 
  205         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  206             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  207             HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  208             &sc->sc_rdmatag);
  209         if (error)
  210                 goto fail_ctag;
  211 
  212         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  213             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  214             HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  215             &sc->sc_tdmatag);
  216         if (error)
  217                 goto fail_rtag;
  218 
  219         /* Allocate control/TX DMA buffer */
  220         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  221             0, &sc->sc_cdmamap);
  222         if (error != 0) {
  223                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  224                 goto fail_ttag;
  225         }
  226 
  227         /* Load the buffer */
  228         sc->sc_rb.rb_dmabase = 0;
  229         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  230              sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  231             sc->sc_rb.rb_dmabase == 0) {
  232                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  233                     error);
  234                 goto fail_free;
  235         }
  236         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  237             sc->sc_rb.rb_dmabase);
  238 
  239         /*
  240          * Prepare the RX descriptors. rdesc serves as marker for the last
  241          * processed descriptor and may be used later on.
  242          */
  243         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  244                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  245                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  246                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  247                 if (error != 0)
  248                         goto fail_rxdesc;
  249         }
  250         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  251             &sc->sc_rb.rb_spare_dmamap);
  252         if (error != 0)
  253                 goto fail_rxdesc;
  254         /* Same for the TX descs. */
  255         for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
  256                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  257                 sc->sc_rb.rb_txdesc[tdesc].htx_flags = 0;
  258                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  259                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  260                 if (error != 0)
  261                         goto fail_txdesc;
  262         }
  263 
  264         device_printf(sc->sc_dev, "Ethernet address:");
  265         for (i = 0; i < 6; i++)
  266                 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
  267         printf("\n");
  268 
  269         /* Initialize ifnet structure. */
  270         ifp->if_softc = sc;
  271         ifp->if_unit = device_get_unit(sc->sc_dev);
  272         ifp->if_name = "hme";
  273         ifp->if_mtu = ETHERMTU;
  274         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
  275         ifp->if_start = hme_start;
  276         ifp->if_ioctl = hme_ioctl;
  277         ifp->if_init = hme_init;
  278         ifp->if_output = ether_output;
  279         ifp->if_watchdog = hme_watchdog;
  280         ifp->if_snd.ifq_maxlen = HME_NTXDESC;
  281 
  282         hme_mifinit(sc);
  283 
  284         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  285             hme_mediastatus)) != 0) {
  286                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  287                 goto fail_rxdesc;
  288         }
  289         sc->sc_mii = device_get_softc(sc->sc_miibus);
  290 
  291         /*
  292          * Walk along the list of attached MII devices and
  293          * establish an `MII instance' to `phy number'
  294          * mapping. We'll use this mapping in media change
  295          * requests to determine which phy to use to program
  296          * the MIF configuration register.
  297          */
  298         for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
  299              child = LIST_NEXT(child, mii_list)) {
  300                 /*
  301                  * Note: we support just two PHYs: the built-in
  302                  * internal device and an external on the MII
  303                  * connector.
  304                  */
  305                 if (child->mii_phy > 1 || child->mii_inst > 1) {
  306                         device_printf(sc->sc_dev, "cannot accomodate "
  307                             "MII device %s at phy %d, instance %d\n",
  308                             device_get_name(child->mii_dev),
  309                             child->mii_phy, child->mii_inst);
  310                         continue;
  311                 }
  312 
  313                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  314         }
  315 
  316         /* Attach the interface. */
  317         ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
  318 
  319         callout_init(&sc->sc_tick_ch, 0);
  320         return (0);
  321 
  322 fail_txdesc:
  323         for (i = 0; i < tdesc; i++) {
  324                 bus_dmamap_destroy(sc->sc_tdmatag,
  325                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  326         }
  327         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  328 fail_rxdesc:
  329         for (i = 0; i < rdesc; i++) {
  330                 bus_dmamap_destroy(sc->sc_rdmatag,
  331                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  332         }
  333         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  334 fail_free:
  335         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  336 fail_ttag:
  337         bus_dma_tag_destroy(sc->sc_tdmatag);
  338 fail_rtag:
  339         bus_dma_tag_destroy(sc->sc_rdmatag);
  340 fail_ctag:
  341         bus_dma_tag_destroy(sc->sc_cdmatag);
  342 fail_ptag:
  343         bus_dma_tag_destroy(sc->sc_pdmatag);
  344         return (error);
  345 }
  346 
  347 static void
  348 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  349 {
  350         struct hme_softc *sc = (struct hme_softc *)xsc;
  351 
  352         if (error != 0)
  353                 return;
  354         KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
  355         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  356 }
  357 
  358 static void
  359 hme_tick(void *arg)
  360 {
  361         struct hme_softc *sc = arg;
  362         int s;
  363 
  364         s = splnet();
  365         mii_tick(sc->sc_mii);
  366         splx(s);
  367 
  368         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  369 }
  370 
  371 static void
  372 hme_reset(struct hme_softc *sc)
  373 {
  374         int s;
  375 
  376         s = splnet();
  377         hme_init(sc);
  378         splx(s);
  379 }
  380 
  381 static void
  382 hme_stop(struct hme_softc *sc)
  383 {
  384         u_int32_t v;
  385         int n;
  386 
  387         callout_stop(&sc->sc_tick_ch);
  388 
  389         /* Reset transmitter and receiver */
  390         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  391             HME_SEB_RESET_ERX);
  392 
  393         for (n = 0; n < 20; n++) {
  394                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  395                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  396                         return;
  397                 DELAY(20);
  398         }
  399 
  400         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  401 }
  402 
  403 static void
  404 hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  405 {
  406         bus_addr_t *a = xsc;
  407 
  408         /* XXX: A cluster should not contain more than one segment, correct? */
  409         if (error != 0 || nsegs != 1)
  410                 return;
  411         *a = segs[0].ds_addr;
  412 }
  413 
  414 /*
  415  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  416  * ring for subsequent use.
  417  */
  418 static void
  419 hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync)
  420 {
  421 
  422         /*
  423          * Dropped a packet, reinitialize the descriptor and turn the
  424          * ownership back to the hardware.
  425          */
  426         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
  427             HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ,
  428             sc->sc_rb.rb_rxdesc[ix].hrx_len)));
  429         if (sync) {
  430                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  431                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  432         }
  433 }
  434 
  435 static int
  436 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  437 {
  438         struct hme_rxdesc *rd;
  439         struct mbuf *m;
  440         bus_addr_t ba;
  441         bus_size_t len, offs;
  442         bus_dmamap_t map;
  443         int a, unmap;
  444         char *b;
  445 
  446         rd = &sc->sc_rb.rb_rxdesc[ri];
  447         unmap = rd->hrx_m != NULL;
  448         if (unmap && keepold) {
  449                 /*
  450                  * Reinitialize the descriptor flags, as they may have been
  451                  * altered by the hardware.
  452                  */
  453                 hme_discard_rxbuf(sc, ri, 0);
  454                 return (0);
  455         }
  456         if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
  457                 return (ENOBUFS);
  458         m_clget(m, M_DONTWAIT);
  459         if ((m->m_flags & M_EXT) == 0)
  460                 goto fail_mcl;
  461         len = m->m_ext.ext_size;
  462         b = mtod(m, char *);
  463         /*
  464          * Required alignment boundary. At least 16 is needed, but since
  465          * the mapping must be done in a way that a burst can start on a
  466          * natural boundary we might need to extend this.
  467          */
  468         a = max(0x10, sc->sc_burst);
  469         /*
  470          * Make sure the buffer suitably aligned: we need an offset of
  471          * 2 modulo a. XXX: this ensures at least 16 byte alignment of the
  472          * header adjacent to the ethernet header,  which should be sufficient
  473          * in all cases. Nevertheless, this second-guesses ALIGN().
  474          */
  475         offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a;
  476         len -= offs;
  477         /* Align the buffer on the boundary for mapping. */
  478         b += offs - 2;
  479         ba = 0;
  480         if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  481             b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0)
  482                 goto fail_mcl;
  483         if (unmap) {
  484                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  485                     BUS_DMASYNC_POSTREAD);
  486                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  487         }
  488         map = rd->hrx_dmamap;
  489         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  490         sc->sc_rb.rb_spare_dmamap = map;
  491         rd->hrx_offs = offs;
  492         rd->hrx_len = len - sc->sc_burst;
  493         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  494         HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
  495         /* Lazily leave at least one burst size grace space. */
  496         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
  497             HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
  498         rd->hrx_m = m;
  499         return (0);
  500 
  501 fail_mcl:
  502         m_freem(m);
  503         return (ENOBUFS);
  504 }
  505 
  506 static int
  507 hme_meminit(struct hme_softc *sc)
  508 {
  509         struct hme_ring *hr = &sc->sc_rb;
  510         struct hme_txdesc *td;
  511         bus_addr_t dma;
  512         caddr_t p;
  513         unsigned int i;
  514         int error;
  515 
  516         p = hr->rb_membase;
  517         dma = hr->rb_dmabase;
  518 
  519         /*
  520          * Allocate transmit descriptors
  521          */
  522         hr->rb_txd = p;
  523         hr->rb_txddma = dma;
  524         p += HME_NTXDESC * HME_XD_SIZE;
  525         dma += HME_NTXDESC * HME_XD_SIZE;
  526         /* We have reserved descriptor space until the next 2048 byte boundary.*/
  527         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  528         p = (caddr_t)roundup((u_long)p, 2048);
  529 
  530         /*
  531          * Allocate receive descriptors
  532          */
  533         hr->rb_rxd = p;
  534         hr->rb_rxddma = dma;
  535         p += HME_NRXDESC * HME_XD_SIZE;
  536         dma += HME_NRXDESC * HME_XD_SIZE;
  537         /* Again move forward to the next 2048 byte boundary.*/
  538         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  539         p = (caddr_t)roundup((u_long)p, 2048);
  540 
  541         /*
  542          * Initialize transmit buffer descriptors
  543          */
  544         for (i = 0; i < HME_NTXDESC; i++) {
  545                 td = &sc->sc_rb.rb_txdesc[i];
  546                 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
  547                 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
  548                 if (td->htx_m != NULL) {
  549                         m_freem(td->htx_m);
  550                         td->htx_m = NULL;
  551                 }
  552                 if ((td->htx_flags & HTXF_MAPPED) != 0)
  553                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  554                 td->htx_flags = 0;
  555         }
  556 
  557         /*
  558          * Initialize receive buffer descriptors
  559          */
  560         for (i = 0; i < HME_NRXDESC; i++) {
  561                 error = hme_add_rxbuf(sc, i, 1);
  562                 if (error != 0)
  563                         return (error);
  564         }
  565 
  566         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  567             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  568 
  569         hr->rb_tdhead = hr->rb_tdtail = 0;
  570         hr->rb_td_nbusy = 0;
  571         hr->rb_rdtail = 0;
  572         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  573             hr->rb_txddma);
  574         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  575             hr->rb_rxddma);
  576         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  577             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  578         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  579             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  580         return (0);
  581 }
  582 
  583 static int
  584 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  585     u_int32_t clr, u_int32_t set)
  586 {
  587         int i = 0;
  588 
  589         val &= ~clr;
  590         val |= set;
  591         HME_MAC_WRITE_4(sc, reg, val);
  592         if (clr == 0 && set == 0)
  593                 return (1);     /* just write, no bits to wait for */
  594         do {
  595                 DELAY(100);
  596                 i++;
  597                 val = HME_MAC_READ_4(sc, reg);
  598                 if (i > 40) {
  599                         /* After 3.5ms, we should have been done. */
  600                         device_printf(sc->sc_dev, "timeout while writing to "
  601                             "MAC configuration register\n");
  602                         return (0);
  603                 }
  604         } while ((val & clr) != 0 && (val & set) != set);
  605         return (1);
  606 }
  607 
  608 /*
  609  * Initialization of interface; set up initialization block
  610  * and transmit/receive descriptor rings.
  611  */
  612 static void
  613 hme_init(void *xsc)
  614 {
  615         struct hme_softc *sc = (struct hme_softc *)xsc;
  616         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  617         u_int8_t *ea;
  618         u_int32_t v;
  619 
  620         /*
  621          * Initialization sequence. The numbered steps below correspond
  622          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  623          * Channel Engine manual (part of the PCIO manual).
  624          * See also the STP2002-STQ document from Sun Microsystems.
  625          */
  626 
  627         /* step 1 & 2. Reset the Ethernet Channel */
  628         hme_stop(sc);
  629 
  630         /* Re-initialize the MIF */
  631         hme_mifinit(sc);
  632 
  633         /* Call MI reset function if any */
  634         if (sc->sc_hwreset)
  635                 (*sc->sc_hwreset)(sc);
  636 
  637 #if 0
  638         /* Mask all MIF interrupts, just in case */
  639         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  640 #endif
  641 
  642         /* step 3. Setup data structures in host memory */
  643         if (hme_meminit(sc) != 0) {
  644                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  645                 return;
  646         }
  647 
  648         /* step 4. TX MAC registers & counters */
  649         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  650         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  651         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  652         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  653         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN);
  654 
  655         /* Load station MAC address */
  656         ea = sc->sc_arpcom.ac_enaddr;
  657         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  658         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  659         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  660 
  661         /*
  662          * Init seed for backoff
  663          * (source suggested by manual: low 10 bits of MAC address)
  664          */
  665         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  666         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  667 
  668 
  669         /* Note: Accepting power-on default for other MAC registers here.. */
  670 
  671         /* step 5. RX MAC registers & counters */
  672         hme_setladrf(sc, 0);
  673 
  674         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  675         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  676         /* Transmit Descriptor ring size: in increments of 16 */
  677         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  678 
  679         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  680         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN);
  681 
  682         /* step 8. Global Configuration & Interrupt Mask */
  683         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  684             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  685                 HME_SEB_STAT_HOSTTOTX |
  686                 HME_SEB_STAT_RXTOHOST |
  687                 HME_SEB_STAT_TXALL |
  688                 HME_SEB_STAT_TXPERR |
  689                 HME_SEB_STAT_RCNTEXP |
  690                 HME_SEB_STAT_ALL_ERRORS ));
  691 
  692         switch (sc->sc_burst) {
  693         default:
  694                 v = 0;
  695                 break;
  696         case 16:
  697                 v = HME_SEB_CFG_BURST16;
  698                 break;
  699         case 32:
  700                 v = HME_SEB_CFG_BURST32;
  701                 break;
  702         case 64:
  703                 v = HME_SEB_CFG_BURST64;
  704                 break;
  705         }
  706         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  707 
  708         /* step 9. ETX Configuration: use mostly default values */
  709 
  710         /* Enable DMA */
  711         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  712         v |= HME_ETX_CFG_DMAENABLE;
  713         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  714 
  715         /* step 10. ERX Configuration */
  716         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  717 
  718         /* Encode Receive Descriptor ring size: four possible values */
  719         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  720         switch (HME_NRXDESC) {
  721         case 32:
  722                 v |= HME_ERX_CFG_RINGSIZE32;
  723                 break;
  724         case 64:
  725                 v |= HME_ERX_CFG_RINGSIZE64;
  726                 break;
  727         case 128:
  728                 v |= HME_ERX_CFG_RINGSIZE128;
  729                 break;
  730         case 256:
  731                 v |= HME_ERX_CFG_RINGSIZE256;
  732                 break;
  733         default:
  734                 printf("hme: invalid Receive Descriptor ring size\n");
  735                 break;
  736         }
  737 
  738         /* Enable DMA, fix RX first byte offset to 2. */
  739         v &= ~HME_ERX_CFG_FBO_MASK;
  740         v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
  741         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  742         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  743 
  744         /* step 11. XIF Configuration */
  745         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  746         v |= HME_MAC_XIF_OE;
  747         /* If an external transceiver is connected, enable its MII drivers */
  748         if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
  749                 v |= HME_MAC_XIF_MIIENABLE;
  750         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  751         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  752 
  753         /* step 12. RX_MAC Configuration Register */
  754         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  755         v |= HME_MAC_RXCFG_ENABLE;
  756         v &= ~(HME_MAC_RXCFG_DCRCS);
  757         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  758         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  759 
  760         /* step 13. TX_MAC Configuration Register */
  761         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  762         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  763         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  764         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  765 
  766         /* step 14. Issue Transmit Pending command */
  767 
  768         /* Call MI initialization function if any */
  769         if (sc->sc_hwinit)
  770                 (*sc->sc_hwinit)(sc);
  771 
  772 #ifdef HMEDEBUG
  773         /* Debug: double-check. */
  774         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  775             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  776             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  777             HME_ERX_READ_4(sc, HME_ERXI_RING),
  778             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  779         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  780             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  781             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  782             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  783         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  784             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  785             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  786 #endif
  787 
  788         /* Start the one second timer. */
  789         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  790 
  791         ifp->if_flags |= IFF_RUNNING;
  792         ifp->if_flags &= ~IFF_OACTIVE;
  793         ifp->if_timer = 0;
  794         hme_start(ifp);
  795 }
  796 
  797 struct hme_txdma_arg {
  798         struct hme_softc *hta_sc;
  799         struct mbuf *hta_m;
  800         int hta_err;
  801         int hta_flags;
  802         int hta_offs;
  803         int hta_pad;
  804 };
  805 
  806 /* Values for hta_flags */
  807 #define HTAF_SOP        1       /* Start of packet (first mbuf in chain) */
  808 #define HTAF_EOP        2       /* Start of packet (last mbuf in chain) */
  809 
  810 static void
  811 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  812 {
  813         struct hme_txdma_arg *ta = xsc;
  814         struct hme_txdesc *td;
  815         bus_addr_t addr;
  816         bus_size_t sz;
  817         caddr_t txd;
  818         u_int32_t flags;
  819         int i, *tdhead, pci;
  820 
  821         ta->hta_err = error;
  822         if (error != 0)
  823                 return;
  824 
  825         tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
  826         pci = ta->hta_sc->sc_pci;
  827         txd = ta->hta_sc->sc_rb.rb_txd;
  828         for (i = 0; i < nsegs; i++) {
  829                 if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
  830                         ta->hta_err = -1;
  831                         return;
  832                 }
  833                 td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead];
  834                 addr = segs[i].ds_addr;
  835                 sz = segs[i].ds_len;
  836                 if (i == 0) {
  837                         /* Adjust the offsets. */
  838                         addr += ta->hta_offs;
  839                         sz -= ta->hta_offs;
  840                         td->htx_flags = HTXF_MAPPED;
  841                 } else
  842                         td->htx_flags = 0;
  843                 if (i == nsegs - 1) {
  844                         /* Subtract the pad. */
  845                         if (sz < ta->hta_pad) {
  846                                 /*
  847                                  * Ooops. This should not have happened; it
  848                                  * means that we got a zero-size segment or
  849                                  * segment sizes were unnatural.
  850                                  */
  851                                 device_printf(ta->hta_sc->sc_dev,
  852                                     "hme_txdma_callback: alignment glitch\n");
  853                                 ta->hta_err = EINVAL;
  854                                 return;
  855                         }
  856                         sz -= ta->hta_pad;
  857                         /* If sz is 0 now, this does not matter. */
  858                 }
  859                 /* Fill the ring entry. */
  860                 flags = HME_XD_ENCODE_TSIZE(sz);
  861                 if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
  862                         flags |= HME_XD_SOP;
  863                 if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
  864                         flags |= HME_XD_EOP;
  865                         td->htx_m = ta->hta_m;
  866                 } else
  867                         td->htx_m = NULL;
  868                 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
  869                     "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags,
  870                     (u_int)addr);
  871                 HME_XD_SETFLAGS(pci, txd, *tdhead, flags);
  872                 HME_XD_SETADDR(pci, txd, *tdhead, addr);
  873 
  874                 ta->hta_sc->sc_rb.rb_td_nbusy++;
  875                 *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
  876         }
  877 }
  878 
  879 /*
  880  * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
  881  * start the transmission.
  882  * Returns 0 on success, -1 if there were not enough free descriptors to map
  883  * the packet, or an errno otherwise.
  884  */
  885 static int
  886 hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
  887 {
  888         struct hme_txdma_arg cba;
  889         struct mbuf *m = m0, *n;
  890         struct hme_txdesc *td;
  891         char *start;
  892         int error, len, si, ri, totlen, sum;
  893         u_int32_t flags;
  894 
  895         if ((m->m_flags & M_PKTHDR) == 0)
  896                 panic("hme_dmamap_load_mbuf: no packet header");
  897         totlen = m->m_pkthdr.len;
  898         sum = 0;
  899         si = sc->sc_rb.rb_tdhead;
  900         cba.hta_sc = sc;
  901         cba.hta_err = 0;
  902         cba.hta_flags = HTAF_SOP;
  903         cba.hta_m = m0;
  904         for (; m != NULL && sum < totlen; m = n) {
  905                 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
  906                         error = -1;
  907                         goto fail;
  908                 }
  909                 len = m->m_len;
  910                 n = m->m_next;
  911                 if (len == 0)
  912                         continue;
  913                 sum += len;
  914                 td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead];
  915                 if (n == NULL || sum >= totlen)
  916                         cba.hta_flags |= HTAF_EOP;
  917                 /*
  918                  * This is slightly evil: we must map the buffer in a way that
  919                  * allows dma transfers to start on a natural burst boundary.
  920                  * This is done by rounding down the mapping address, and
  921                  * recording the required offset for the callback. With this,
  922                  * we cannot cross a page boundary because the burst size
  923                  * is a small power of two.
  924                  */
  925                 cba.hta_offs = (sc->sc_burst -
  926                     (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst;
  927                 start = mtod(m, char *) - cba.hta_offs;
  928                 len += cba.hta_offs;
  929                 /*
  930                  * Similarly, the end of the mapping should be on a natural
  931                  * burst boundary. XXX: Let's hope that any segment ends
  932                  * generated by the busdma code are also on such boundaries.
  933                  */
  934                 cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) &
  935                     (sc->sc_burst - 1))) % sc->sc_burst;
  936                 len += cba.hta_pad;
  937                 /* Most of the work is done in the callback. */
  938                 if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap,
  939                     start, len, hme_txdma_callback, &cba, 0)) != 0 ||
  940                     cba.hta_err != 0)
  941                         goto fail;
  942                 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  943                     BUS_DMASYNC_PREWRITE);
  944 
  945                 cba.hta_flags = 0;
  946         }
  947         /* Turn descriptor ownership to the hme, back to forth. */
  948         ri = sc->sc_rb.rb_tdhead;
  949         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
  950             ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
  951         do {
  952                 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
  953                 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
  954                     HME_XD_OWN;
  955                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
  956                     ri, si, flags);
  957                 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
  958         } while (ri != si);
  959 
  960         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  961             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  962 
  963         /* start the transmission. */
  964         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
  965         return (0);
  966 fail:
  967         for (ri = si; ri != sc->sc_rb.rb_tdhead; ri = (ri + 1) % HME_NTXDESC) {
  968                 td = &sc->sc_rb.rb_txdesc[ri];
  969                 if ((td->htx_flags & HTXF_MAPPED) != 0)
  970                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  971                 td->htx_flags = 0;
  972                 td->htx_m = NULL;
  973                 sc->sc_rb.rb_td_nbusy--;
  974                 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, 0);
  975         }
  976         sc->sc_rb.rb_tdhead = si;
  977         error = cba.hta_err != 0 ? cba.hta_err : error;
  978         if (error != -1)
  979                 device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
  980         return (error);
  981 }
  982 
  983 /*
  984  * Pass a packet to the higher levels.
  985  */
  986 static void
  987 hme_read(struct hme_softc *sc, int ix, int len)
  988 {
  989         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  990         struct mbuf *m;
  991         int offs;
  992 
  993         if (len <= sizeof(struct ether_header) ||
  994             len > ETHERMTU + sizeof(struct ether_header)) {
  995 #ifdef HMEDEBUG
  996                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
  997                     len);
  998 #endif
  999                 ifp->if_ierrors++;
 1000                 hme_discard_rxbuf(sc, ix, 1);
 1001                 return;
 1002         }
 1003 
 1004         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1005         offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs;
 1006         CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
 1007 
 1008         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1009                 /*
 1010                  * hme_add_rxbuf will leave the old buffer in the ring until
 1011                  * it is sure that a new buffer can be mapped. If it can not,
 1012                  * drop the packet, but leave the interface up.
 1013                  */
 1014                 ifp->if_iqdrops++;
 1015                 hme_discard_rxbuf(sc, ix, 1);
 1016                 return;
 1017         }
 1018 
 1019         ifp->if_ipackets++;
 1020 
 1021         /* Changed the rings; sync. */
 1022         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1023             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1024 
 1025         m->m_pkthdr.rcvif = ifp;
 1026         m->m_pkthdr.len = m->m_len = len + offs;
 1027         m_adj(m, offs);
 1028         /* Pass the packet up. */
 1029         (*ifp->if_input)(ifp, m);
 1030 }
 1031 
 1032 static void
 1033 hme_start(struct ifnet *ifp)
 1034 {
 1035         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1036         struct mbuf *m;
 1037         int error, enq = 0;
 1038 
 1039         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1040                 return;
 1041 
 1042         error = 0;
 1043         for (;;) {
 1044                 IF_DEQUEUE(&ifp->if_snd, m);
 1045                 if (m == NULL)
 1046                         break;
 1047 
 1048                 error = hme_load_mbuf(sc, m);
 1049                 if (error != 0) {
 1050                         ifp->if_flags |= IFF_OACTIVE;
 1051                         IF_PREPEND(&ifp->if_snd, m);
 1052                         break;
 1053                 } else {
 1054                         enq = 1;
 1055                         BPF_MTAP(ifp, m);
 1056                 }
 1057         }
 1058 
 1059         if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
 1060                 ifp->if_flags |= IFF_OACTIVE;
 1061         /* Set watchdog timer if a packet was queued */
 1062         if (enq)
 1063                 ifp->if_timer = 5;
 1064 }
 1065 
 1066 /*
 1067  * Transmit interrupt.
 1068  */
 1069 static void
 1070 hme_tint(struct hme_softc *sc)
 1071 {
 1072         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1073         struct hme_txdesc *td;
 1074         unsigned int ri, txflags;
 1075 
 1076         /*
 1077          * Unload collision counters
 1078          */
 1079         ifp->if_collisions +=
 1080                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
 1081                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
 1082                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
 1083                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
 1084 
 1085         /*
 1086          * then clear the hardware counters.
 1087          */
 1088         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
 1089         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
 1090         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
 1091         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
 1092 
 1093         /* Fetch current position in the transmit ring */
 1094         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1095                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1096                         CTR0(KTR_HME, "hme_tint: not busy!");
 1097                         break;
 1098                 }
 1099 
 1100                 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
 1101                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1102 
 1103                 if ((txflags & HME_XD_OWN) != 0)
 1104                         break;
 1105 
 1106                 td = &sc->sc_rb.rb_txdesc[ri];
 1107                 CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags);
 1108                 if ((td->htx_flags & HTXF_MAPPED) != 0) {
 1109                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
 1110                             BUS_DMASYNC_POSTWRITE);
 1111                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
 1112                 }
 1113                 td->htx_flags = 0;
 1114                 --sc->sc_rb.rb_td_nbusy;
 1115                 ifp->if_flags &= ~IFF_OACTIVE;
 1116 
 1117                 /* Complete packet transmitted? */
 1118                 if ((txflags & HME_XD_EOP) == 0)
 1119                         continue;
 1120 
 1121                 ifp->if_opackets++;
 1122                 m_freem(td->htx_m);
 1123                 td->htx_m = NULL;
 1124         }
 1125         /* Turn off watchdog */
 1126         if (sc->sc_rb.rb_td_nbusy == 0)
 1127                 ifp->if_timer = 0;
 1128 
 1129         /* Update ring */
 1130         sc->sc_rb.rb_tdtail = ri;
 1131 
 1132         hme_start(ifp);
 1133 
 1134         if (sc->sc_rb.rb_td_nbusy == 0)
 1135                 ifp->if_timer = 0;
 1136 }
 1137 
 1138 /*
 1139  * Receive interrupt.
 1140  */
 1141 static void
 1142 hme_rint(struct hme_softc *sc)
 1143 {
 1144         caddr_t xdr = sc->sc_rb.rb_rxd;
 1145         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1146         unsigned int ri, len;
 1147         u_int32_t flags;
 1148 
 1149         /*
 1150          * Process all buffers with valid data.
 1151          */
 1152         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1153                 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
 1154                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1155                 if ((flags & HME_XD_OWN) != 0)
 1156                         break;
 1157 
 1158                 if ((flags & HME_XD_OFL) != 0) {
 1159                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1160                             "flags=0x%x\n", ri, flags);
 1161                         ifp->if_ierrors++;
 1162                         hme_discard_rxbuf(sc, ri, 1);
 1163                 } else {
 1164                         len = HME_XD_DECODE_RSIZE(flags);
 1165                         hme_read(sc, ri, len);
 1166                 }
 1167         }
 1168 
 1169         sc->sc_rb.rb_rdtail = ri;
 1170 }
 1171 
 1172 static void
 1173 hme_eint(struct hme_softc *sc, u_int status)
 1174 {
 1175 
 1176         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1177                 device_printf(sc->sc_dev, "XXXlink status changed\n");
 1178                 return;
 1179         }
 1180 
 1181         HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1182 }
 1183 
 1184 void
 1185 hme_intr(void *v)
 1186 {
 1187         struct hme_softc *sc = (struct hme_softc *)v;
 1188         u_int32_t status;
 1189 
 1190         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1191         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1192 
 1193         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1194                 hme_eint(sc, status);
 1195 
 1196         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1197                 hme_tint(sc);
 1198 
 1199         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1200                 hme_rint(sc);
 1201 }
 1202 
 1203 
 1204 static void
 1205 hme_watchdog(struct ifnet *ifp)
 1206 {
 1207         struct hme_softc *sc = ifp->if_softc;
 1208 #ifdef HMEDEBUG
 1209         u_int32_t status;
 1210 
 1211         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1212         CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
 1213 #endif
 1214         device_printf(sc->sc_dev, "device timeout\n");
 1215         ++ifp->if_oerrors;
 1216 
 1217         hme_reset(sc);
 1218 }
 1219 
 1220 /*
 1221  * Initialize the MII Management Interface
 1222  */
 1223 static void
 1224 hme_mifinit(struct hme_softc *sc)
 1225 {
 1226         u_int32_t v;
 1227 
 1228         /* Configure the MIF in frame mode */
 1229         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1230         v &= ~HME_MIF_CFG_BBMODE;
 1231         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1232 }
 1233 
 1234 /*
 1235  * MII interface
 1236  */
 1237 int
 1238 hme_mii_readreg(device_t dev, int phy, int reg)
 1239 {
 1240         struct hme_softc *sc = device_get_softc(dev);
 1241         int n;
 1242         u_int32_t v;
 1243 
 1244         /* Select the desired PHY in the MIF configuration register */
 1245         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1246         /* Clear PHY select bit */
 1247         v &= ~HME_MIF_CFG_PHY;
 1248         if (phy == HME_PHYAD_EXTERNAL)
 1249                 /* Set PHY select bit to get at external device */
 1250                 v |= HME_MIF_CFG_PHY;
 1251         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1252 
 1253         /* Construct the frame command */
 1254         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1255             HME_MIF_FO_TAMSB |
 1256             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1257             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1258             (reg << HME_MIF_FO_REGAD_SHIFT);
 1259 
 1260         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1261         for (n = 0; n < 100; n++) {
 1262                 DELAY(1);
 1263                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1264                 if (v & HME_MIF_FO_TALSB)
 1265                         return (v & HME_MIF_FO_DATA);
 1266         }
 1267 
 1268         device_printf(sc->sc_dev, "mii_read timeout\n");
 1269         return (0);
 1270 }
 1271 
 1272 int
 1273 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1274 {
 1275         struct hme_softc *sc = device_get_softc(dev);
 1276         int n;
 1277         u_int32_t v;
 1278 
 1279         /* Select the desired PHY in the MIF configuration register */
 1280         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1281         /* Clear PHY select bit */
 1282         v &= ~HME_MIF_CFG_PHY;
 1283         if (phy == HME_PHYAD_EXTERNAL)
 1284                 /* Set PHY select bit to get at external device */
 1285                 v |= HME_MIF_CFG_PHY;
 1286         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1287 
 1288         /* Construct the frame command */
 1289         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1290             HME_MIF_FO_TAMSB                            |
 1291             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1292             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1293             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1294             (val & HME_MIF_FO_DATA);
 1295 
 1296         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1297         for (n = 0; n < 100; n++) {
 1298                 DELAY(1);
 1299                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1300                 if (v & HME_MIF_FO_TALSB)
 1301                         return (1);
 1302         }
 1303 
 1304         device_printf(sc->sc_dev, "mii_write timeout\n");
 1305         return (0);
 1306 }
 1307 
 1308 void
 1309 hme_mii_statchg(device_t dev)
 1310 {
 1311         struct hme_softc *sc = device_get_softc(dev);
 1312         int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
 1313         int phy = sc->sc_phys[instance];
 1314         u_int32_t v;
 1315 
 1316 #ifdef HMEDEBUG
 1317         if (sc->sc_debug)
 1318                 printf("hme_mii_statchg: status change: phy = %d\n", phy);
 1319 #endif
 1320 
 1321         /* Select the current PHY in the MIF configuration register */
 1322         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1323         v &= ~HME_MIF_CFG_PHY;
 1324         if (phy == HME_PHYAD_EXTERNAL)
 1325                 v |= HME_MIF_CFG_PHY;
 1326         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1327 
 1328         /* Set the MAC Full Duplex bit appropriately */
 1329         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1330         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
 1331                 return;
 1332         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1333                 v |= HME_MAC_TXCFG_FULLDPLX;
 1334         else
 1335                 v &= ~HME_MAC_TXCFG_FULLDPLX;
 1336         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
 1337         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
 1338                 return;
 1339 }
 1340 
 1341 static int
 1342 hme_mediachange(struct ifnet *ifp)
 1343 {
 1344         struct hme_softc *sc = ifp->if_softc;
 1345 
 1346         return (mii_mediachg(sc->sc_mii));
 1347 }
 1348 
 1349 static void
 1350 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1351 {
 1352         struct hme_softc *sc = ifp->if_softc;
 1353 
 1354         if ((ifp->if_flags & IFF_UP) == 0)
 1355                 return;
 1356 
 1357         mii_pollstat(sc->sc_mii);
 1358         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1359         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1360 }
 1361 
 1362 /*
 1363  * Process an ioctl request.
 1364  */
 1365 static int
 1366 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1367 {
 1368         struct hme_softc *sc = ifp->if_softc;
 1369         struct ifreq *ifr = (struct ifreq *)data;
 1370         int s, error = 0;
 1371 
 1372         s = splnet();
 1373 
 1374         switch (cmd) {
 1375         case SIOCSIFFLAGS:
 1376                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1377                     (ifp->if_flags & IFF_RUNNING) != 0) {
 1378                         /*
 1379                          * If interface is marked down and it is running, then
 1380                          * stop it.
 1381                          */
 1382                         hme_stop(sc);
 1383                         ifp->if_flags &= ~IFF_RUNNING;
 1384                 } else if ((ifp->if_flags & IFF_UP) != 0 &&
 1385                            (ifp->if_flags & IFF_RUNNING) == 0) {
 1386                         /*
 1387                          * If interface is marked up and it is stopped, then
 1388                          * start it.
 1389                          */
 1390                         hme_init(sc);
 1391                 } else if ((ifp->if_flags & IFF_UP) != 0) {
 1392                         /*
 1393                          * Reset the interface to pick up changes in any other
 1394                          * flags that affect hardware registers.
 1395                          */
 1396                         /*hme_stop(sc);*/
 1397                         hme_init(sc);
 1398                 }
 1399 #ifdef HMEDEBUG
 1400                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1401 #endif
 1402                 break;
 1403 
 1404         case SIOCADDMULTI:
 1405         case SIOCDELMULTI:
 1406                 hme_setladrf(sc, 1);
 1407                 error = 0;
 1408                 break;
 1409         case SIOCGIFMEDIA:
 1410         case SIOCSIFMEDIA:
 1411                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1412                 break;
 1413         default:
 1414                 error = ether_ioctl(ifp, cmd, data);
 1415                 break;
 1416         }
 1417 
 1418         splx(s);
 1419         return (error);
 1420 }
 1421 
 1422 #if 0
 1423 static void
 1424 hme_shutdown(void *arg)
 1425 {
 1426 
 1427         hme_stop((struct hme_softc *)arg);
 1428 }
 1429 #endif
 1430 
 1431 /*
 1432  * Set up the logical address filter.
 1433  */
 1434 static void
 1435 hme_setladrf(struct hme_softc *sc, int reenable)
 1436 {
 1437         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1438         struct ifmultiaddr *inm;
 1439         struct sockaddr_dl *sdl;
 1440         u_char *cp;
 1441         u_int32_t crc;
 1442         u_int32_t hash[4];
 1443         u_int32_t macc;
 1444         int len;
 1445 
 1446         /* Clear hash table */
 1447         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1448 
 1449         /* Get current RX configuration */
 1450         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1451 
 1452         /*
 1453          * Disable the receiver while changing it's state as the documentation
 1454          * mandates.
 1455          * We then must wait until the bit clears in the register. This should
 1456          * take at most 3.5ms.
 1457          */
 1458         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
 1459                 return;
 1460         /* Disable the hash filter before writing to the filter registers. */
 1461         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1462             HME_MAC_RXCFG_HENABLE, 0))
 1463                 return;
 1464 
 1465         if (reenable)
 1466                 macc |= HME_MAC_RXCFG_ENABLE;
 1467         else
 1468                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1469 
 1470         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1471                 /* Turn on promiscuous mode; turn off the hash filter */
 1472                 macc |= HME_MAC_RXCFG_PMISC;
 1473                 macc &= ~HME_MAC_RXCFG_HENABLE;
 1474                 ifp->if_flags |= IFF_ALLMULTI;
 1475                 goto chipit;
 1476         }
 1477 
 1478         /* Turn off promiscuous mode; turn on the hash filter */
 1479         macc &= ~HME_MAC_RXCFG_PMISC;
 1480         macc |= HME_MAC_RXCFG_HENABLE;
 1481 
 1482         /*
 1483          * Set up multicast address filter by passing all multicast addresses
 1484          * through a crc generator, and then using the high order 6 bits as an
 1485          * index into the 64 bit logical address filter.  The high order bit
 1486          * selects the word, while the rest of the bits select the bit within
 1487          * the word.
 1488          */
 1489 
 1490         TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
 1491                 if (inm->ifma_addr->sa_family != AF_LINK)
 1492                         continue;
 1493                 sdl = (struct sockaddr_dl *)inm->ifma_addr;
 1494                 cp = LLADDR(sdl);
 1495                 crc = 0xffffffff;
 1496                 for (len = sdl->sdl_alen; --len >= 0;) {
 1497                         int octet = *cp++;
 1498                         int i;
 1499 
 1500 #define MC_POLY_LE      0xedb88320UL    /* mcast crc, little endian */
 1501                         for (i = 0; i < 8; i++) {
 1502                                 if ((crc & 1) ^ (octet & 1)) {
 1503                                         crc >>= 1;
 1504                                         crc ^= MC_POLY_LE;
 1505                                 } else {
 1506                                         crc >>= 1;
 1507                                 }
 1508                                 octet >>= 1;
 1509                         }
 1510                 }
 1511                 /* Just want the 6 most significant bits. */
 1512                 crc >>= 26;
 1513 
 1514                 /* Set the corresponding bit in the filter. */
 1515                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1516         }
 1517 
 1518         ifp->if_flags &= ~IFF_ALLMULTI;
 1519 
 1520 chipit:
 1521         /* Now load the hash table into the chip */
 1522         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1523         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1524         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1525         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1526         hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1527             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
 1528 }

Cache object: 4bc0672acbe5a91853a18e2ac2a0ac46


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.