The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/5.2/sys/dev/hme/if_hme.c 121816 2003-10-31 18:32:15Z brooks $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * Checksumming is not yet supported.
   58  */
   59 
   60 #define HMEDEBUG
   61 #define KTR_HME         KTR_CT2         /* XXX */
   62 
   63 #include <sys/param.h>
   64 #include <sys/systm.h>
   65 #include <sys/bus.h>
   66 #include <sys/endian.h>
   67 #include <sys/kernel.h>
   68 #include <sys/ktr.h>
   69 #include <sys/mbuf.h>
   70 #include <sys/malloc.h>
   71 #include <sys/socket.h>
   72 #include <sys/sockio.h>
   73 
   74 #include <net/bpf.h>
   75 #include <net/ethernet.h>
   76 #include <net/if.h>
   77 #include <net/if_arp.h>
   78 #include <net/if_dl.h>
   79 #include <net/if_media.h>
   80 
   81 #include <dev/mii/mii.h>
   82 #include <dev/mii/miivar.h>
   83 
   84 #include <machine/bus.h>
   85 
   86 #include <dev/hme/if_hmereg.h>
   87 #include <dev/hme/if_hmevar.h>
   88 
   89 static void     hme_start(struct ifnet *);
   90 static void     hme_stop(struct hme_softc *);
   91 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
   92 static void     hme_tick(void *);
   93 static void     hme_watchdog(struct ifnet *);
   94 static void     hme_init(void *);
   95 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
   96 static int      hme_meminit(struct hme_softc *);
   97 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
   98     u_int32_t, u_int32_t);
   99 static void     hme_mifinit(struct hme_softc *);
  100 static void     hme_reset(struct hme_softc *);
  101 static void     hme_setladrf(struct hme_softc *, int);
  102 
  103 static int      hme_mediachange(struct ifnet *);
  104 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  105 
  106 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf *);
  107 static void     hme_read(struct hme_softc *, int, int);
  108 static void     hme_eint(struct hme_softc *, u_int);
  109 static void     hme_rint(struct hme_softc *);
  110 static void     hme_tint(struct hme_softc *);
  111 
  112 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  113 static void     hme_rxdma_callback(void *, bus_dma_segment_t *, int,
  114     bus_size_t, int);
  115 static void     hme_txdma_callback(void *, bus_dma_segment_t *, int,
  116     bus_size_t, int);
  117 
  118 devclass_t hme_devclass;
  119 
  120 static int hme_nerr;
  121 
  122 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  123 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  124 
  125 #define HME_SPC_READ_4(spc, sc, offs) \
  126         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  127             (sc)->sc_ ## spc ## o + (offs))
  128 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  129         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  130             (sc)->sc_ ## spc ## o + (offs), (v))
  131 
  132 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  133 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  134 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  135 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  136 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  137 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  138 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  139 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  140 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  141 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  142 
  143 #define HME_MAXERR      5
  144 #define HME_WHINE(dev, ...) do {                                        \
  145         if (hme_nerr++ < HME_MAXERR)                                    \
  146                 device_printf(dev, __VA_ARGS__);                        \
  147         if (hme_nerr == HME_MAXERR) {                                   \
  148                 device_printf(dev, "too may errors; not reporting any " \
  149                     "more\n");                                          \
  150         }                                                               \
  151 } while(0)
  152 
  153 int
  154 hme_config(struct hme_softc *sc)
  155 {
  156         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  157         struct mii_softc *child;
  158         bus_size_t size;
  159         int error, rdesc, tdesc, i;
  160 
  161         /*
  162          * HME common initialization.
  163          *
  164          * hme_softc fields that must be initialized by the front-end:
  165          *
  166          * the dma bus tag:
  167          *      sc_dmatag
  168          *
  169          * the bus handles, tags and offsets (splitted for SBus compatability):
  170          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  171          *      sc_erx{t,h,o}   (Receiver Unit registers)
  172          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  173          *      sc_mac{t,h,o}   (MAC registers)
  174          *      sc_mif{t,h,o}   (Managment Interface registers)
  175          *
  176          * the maximum bus burst size:
  177          *      sc_burst
  178          *
  179          */
  180 
  181         /* Make sure the chip is stopped. */
  182         hme_stop(sc);
  183 
  184         /*
  185          * Allocate DMA capable memory
  186          * Buffer descriptors must be aligned on a 2048 byte boundary;
  187          * take this into account when calculating the size. Note that
  188          * the maximum number of descriptors (256) occupies 2048 bytes,
  189          * so we allocate that much regardless of HME_N*DESC.
  190          */
  191         size =  4096;
  192 
  193         error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  194             BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
  195             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
  196         if (error)
  197                 return (error);
  198 
  199         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  200             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  201             1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
  202             &Giant, &sc->sc_cdmatag);
  203         if (error)
  204                 goto fail_ptag;
  205 
  206         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  207             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  208             HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  209             NULL, NULL, &sc->sc_rdmatag);
  210         if (error)
  211                 goto fail_ctag;
  212 
  213         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  214             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  215             HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  216             NULL, NULL, &sc->sc_tdmatag);
  217         if (error)
  218                 goto fail_rtag;
  219 
  220         /* Allocate control/TX DMA buffer */
  221         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  222             0, &sc->sc_cdmamap);
  223         if (error != 0) {
  224                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  225                 goto fail_ttag;
  226         }
  227 
  228         /* Load the buffer */
  229         sc->sc_rb.rb_dmabase = 0;
  230         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  231              sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  232             sc->sc_rb.rb_dmabase == 0) {
  233                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  234                     error);
  235                 goto fail_free;
  236         }
  237         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  238             sc->sc_rb.rb_dmabase);
  239 
  240         /*
  241          * Prepare the RX descriptors. rdesc serves as marker for the last
  242          * processed descriptor and may be used later on.
  243          */
  244         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  245                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  246                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  247                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  248                 if (error != 0)
  249                         goto fail_rxdesc;
  250         }
  251         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  252             &sc->sc_rb.rb_spare_dmamap);
  253         if (error != 0)
  254                 goto fail_rxdesc;
  255         /* Same for the TX descs. */
  256         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  257                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  258                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  259                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  260                 if (error != 0)
  261                         goto fail_txdesc;
  262         }
  263 
  264         device_printf(sc->sc_dev, "Ethernet address:");
  265         for (i = 0; i < 6; i++)
  266                 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
  267         printf("\n");
  268 
  269         /* Initialize ifnet structure. */
  270         ifp->if_softc = sc;
  271         if_initname(ifp, device_get_name(sc->sc_dev),
  272             device_get_unit(sc->sc_dev));
  273         ifp->if_mtu = ETHERMTU;
  274         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
  275         ifp->if_start = hme_start;
  276         ifp->if_ioctl = hme_ioctl;
  277         ifp->if_init = hme_init;
  278         ifp->if_output = ether_output;
  279         ifp->if_watchdog = hme_watchdog;
  280         ifp->if_snd.ifq_maxlen = HME_NTXQ;
  281 
  282         hme_mifinit(sc);
  283 
  284         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  285             hme_mediastatus)) != 0) {
  286                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  287                 goto fail_rxdesc;
  288         }
  289         sc->sc_mii = device_get_softc(sc->sc_miibus);
  290 
  291         /*
  292          * Walk along the list of attached MII devices and
  293          * establish an `MII instance' to `phy number'
  294          * mapping. We'll use this mapping in media change
  295          * requests to determine which phy to use to program
  296          * the MIF configuration register.
  297          */
  298         for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
  299              child = LIST_NEXT(child, mii_list)) {
  300                 /*
  301                  * Note: we support just two PHYs: the built-in
  302                  * internal device and an external on the MII
  303                  * connector.
  304                  */
  305                 if (child->mii_phy > 1 || child->mii_inst > 1) {
  306                         device_printf(sc->sc_dev, "cannot accomodate "
  307                             "MII device %s at phy %d, instance %d\n",
  308                             device_get_name(child->mii_dev),
  309                             child->mii_phy, child->mii_inst);
  310                         continue;
  311                 }
  312 
  313                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  314         }
  315 
  316         /* Attach the interface. */
  317         ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
  318 
  319         callout_init(&sc->sc_tick_ch, 0);
  320         return (0);
  321 
  322 fail_txdesc:
  323         for (i = 0; i < tdesc; i++) {
  324                 bus_dmamap_destroy(sc->sc_tdmatag,
  325                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  326         }
  327         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  328 fail_rxdesc:
  329         for (i = 0; i < rdesc; i++) {
  330                 bus_dmamap_destroy(sc->sc_rdmatag,
  331                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  332         }
  333         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  334 fail_free:
  335         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  336 fail_ttag:
  337         bus_dma_tag_destroy(sc->sc_tdmatag);
  338 fail_rtag:
  339         bus_dma_tag_destroy(sc->sc_rdmatag);
  340 fail_ctag:
  341         bus_dma_tag_destroy(sc->sc_cdmatag);
  342 fail_ptag:
  343         bus_dma_tag_destroy(sc->sc_pdmatag);
  344         return (error);
  345 }
  346 
  347 void
  348 hme_detach(struct hme_softc *sc)
  349 {
  350         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  351         int i;
  352 
  353         ether_ifdetach(ifp);
  354         hme_stop(sc);
  355         device_delete_child(sc->sc_dev, sc->sc_miibus);
  356 
  357         for (i = 0; i < HME_NTXQ; i++) {
  358                 bus_dmamap_destroy(sc->sc_tdmatag,
  359                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  360         }
  361         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  362         for (i = 0; i < HME_NRXDESC; i++) {
  363                 bus_dmamap_destroy(sc->sc_rdmatag,
  364                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  365         }
  366         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
  367         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
  368         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  369         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  370         bus_dma_tag_destroy(sc->sc_tdmatag);
  371         bus_dma_tag_destroy(sc->sc_rdmatag);
  372         bus_dma_tag_destroy(sc->sc_cdmatag);
  373         bus_dma_tag_destroy(sc->sc_pdmatag);
  374 }
  375 
  376 void
  377 hme_suspend(struct hme_softc *sc)
  378 {
  379 
  380         hme_stop(sc);
  381 }
  382 
  383 void
  384 hme_resume(struct hme_softc *sc)
  385 {
  386         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  387 
  388         if ((ifp->if_flags & IFF_UP) != 0)
  389                 hme_init(ifp);
  390 }
  391 
  392 static void
  393 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  394 {
  395         struct hme_softc *sc = (struct hme_softc *)xsc;
  396 
  397         if (error != 0)
  398                 return;
  399         KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
  400         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  401 }
  402 
  403 static void
  404 hme_tick(void *arg)
  405 {
  406         struct hme_softc *sc = arg;
  407         int s;
  408 
  409         s = splnet();
  410         mii_tick(sc->sc_mii);
  411         splx(s);
  412 
  413         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  414 }
  415 
  416 static void
  417 hme_reset(struct hme_softc *sc)
  418 {
  419         int s;
  420 
  421         s = splnet();
  422         hme_init(sc);
  423         splx(s);
  424 }
  425 
  426 static void
  427 hme_stop(struct hme_softc *sc)
  428 {
  429         u_int32_t v;
  430         int n;
  431 
  432         callout_stop(&sc->sc_tick_ch);
  433 
  434         /* Reset transmitter and receiver */
  435         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  436             HME_SEB_RESET_ERX);
  437 
  438         for (n = 0; n < 20; n++) {
  439                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  440                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  441                         return;
  442                 DELAY(20);
  443         }
  444 
  445         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  446 }
  447 
  448 static void
  449 hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  450     bus_size_t totsize, int error)
  451 {
  452         bus_addr_t *a = xsc;
  453 
  454         KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
  455         if (error != 0)
  456                 return;
  457         *a = segs[0].ds_addr;
  458 }
  459 
  460 /*
  461  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  462  * ring for subsequent use.
  463  */
  464 static __inline void
  465 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  466 {
  467 
  468         /*
  469          * Dropped a packet, reinitialize the descriptor and turn the
  470          * ownership back to the hardware.
  471          */
  472         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
  473             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
  474 }
  475 
  476 static int
  477 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  478 {
  479         struct hme_rxdesc *rd;
  480         struct mbuf *m;
  481         bus_addr_t ba;
  482         bus_dmamap_t map;
  483         uintptr_t b;
  484         int a, unmap;
  485 
  486         rd = &sc->sc_rb.rb_rxdesc[ri];
  487         unmap = rd->hrx_m != NULL;
  488         if (unmap && keepold) {
  489                 /*
  490                  * Reinitialize the descriptor flags, as they may have been
  491                  * altered by the hardware.
  492                  */
  493                 hme_discard_rxbuf(sc, ri);
  494                 return (0);
  495         }
  496         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  497                 return (ENOBUFS);
  498         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  499         b = mtod(m, uintptr_t);
  500         /*
  501          * Required alignment boundary. At least 16 is needed, but since
  502          * the mapping must be done in a way that a burst can start on a
  503          * natural boundary we might need to extend this.
  504          */
  505         a = max(HME_MINRXALIGN, sc->sc_burst);
  506         /*
  507          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  508          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  509          * alignment of the header adjacent to the ethernet header, which
  510          * should be sufficient in all cases. Nevertheless, this second-guesses
  511          * ALIGN().
  512          */
  513         m_adj(m, roundup2(b, a) - b);
  514         if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  515             m, hme_rxdma_callback, &ba, 0) != 0) {
  516                 m_freem(m);
  517                 return (ENOBUFS);
  518         }
  519         if (unmap) {
  520                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  521                     BUS_DMASYNC_POSTREAD);
  522                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  523         }
  524         map = rd->hrx_dmamap;
  525         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  526         sc->sc_rb.rb_spare_dmamap = map;
  527         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  528         HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
  529         rd->hrx_m = m;
  530         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
  531             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  532         return (0);
  533 }
  534 
  535 static int
  536 hme_meminit(struct hme_softc *sc)
  537 {
  538         struct hme_ring *hr = &sc->sc_rb;
  539         struct hme_txdesc *td;
  540         bus_addr_t dma;
  541         caddr_t p;
  542         unsigned int i;
  543         int error;
  544 
  545         p = hr->rb_membase;
  546         dma = hr->rb_dmabase;
  547 
  548         /*
  549          * Allocate transmit descriptors
  550          */
  551         hr->rb_txd = p;
  552         hr->rb_txddma = dma;
  553         p += HME_NTXDESC * HME_XD_SIZE;
  554         dma += HME_NTXDESC * HME_XD_SIZE;
  555         /* We have reserved descriptor space until the next 2048 byte boundary.*/
  556         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  557         p = (caddr_t)roundup((u_long)p, 2048);
  558 
  559         /*
  560          * Allocate receive descriptors
  561          */
  562         hr->rb_rxd = p;
  563         hr->rb_rxddma = dma;
  564         p += HME_NRXDESC * HME_XD_SIZE;
  565         dma += HME_NRXDESC * HME_XD_SIZE;
  566         /* Again move forward to the next 2048 byte boundary.*/
  567         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  568         p = (caddr_t)roundup((u_long)p, 2048);
  569 
  570         /*
  571          * Initialize transmit buffer descriptors
  572          */
  573         for (i = 0; i < HME_NTXDESC; i++) {
  574                 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
  575                 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
  576         }
  577 
  578         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  579         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  580         for (i = 0; i < HME_NTXQ; i++) {
  581                 td = &sc->sc_rb.rb_txdesc[i];
  582                 if (td->htx_m != NULL) {
  583                         m_freem(td->htx_m);
  584                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  585                             BUS_DMASYNC_POSTWRITE);
  586                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  587                         td->htx_m = NULL;
  588                 }
  589                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  590         }
  591 
  592         /*
  593          * Initialize receive buffer descriptors
  594          */
  595         for (i = 0; i < HME_NRXDESC; i++) {
  596                 error = hme_add_rxbuf(sc, i, 1);
  597                 if (error != 0)
  598                         return (error);
  599         }
  600 
  601         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
  602         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
  603 
  604         hr->rb_tdhead = hr->rb_tdtail = 0;
  605         hr->rb_td_nbusy = 0;
  606         hr->rb_rdtail = 0;
  607         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  608             hr->rb_txddma);
  609         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  610             hr->rb_rxddma);
  611         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  612             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  613         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  614             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  615         return (0);
  616 }
  617 
  618 static int
  619 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  620     u_int32_t clr, u_int32_t set)
  621 {
  622         int i = 0;
  623 
  624         val &= ~clr;
  625         val |= set;
  626         HME_MAC_WRITE_4(sc, reg, val);
  627         if (clr == 0 && set == 0)
  628                 return (1);     /* just write, no bits to wait for */
  629         do {
  630                 DELAY(100);
  631                 i++;
  632                 val = HME_MAC_READ_4(sc, reg);
  633                 if (i > 40) {
  634                         /* After 3.5ms, we should have been done. */
  635                         device_printf(sc->sc_dev, "timeout while writing to "
  636                             "MAC configuration register\n");
  637                         return (0);
  638                 }
  639         } while ((val & clr) != 0 && (val & set) != set);
  640         return (1);
  641 }
  642 
  643 /*
  644  * Initialization of interface; set up initialization block
  645  * and transmit/receive descriptor rings.
  646  */
  647 static void
  648 hme_init(void *xsc)
  649 {
  650         struct hme_softc *sc = (struct hme_softc *)xsc;
  651         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  652         u_int8_t *ea;
  653         u_int32_t v;
  654 
  655         /*
  656          * Initialization sequence. The numbered steps below correspond
  657          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  658          * Channel Engine manual (part of the PCIO manual).
  659          * See also the STP2002-STQ document from Sun Microsystems.
  660          */
  661 
  662         /* step 1 & 2. Reset the Ethernet Channel */
  663         hme_stop(sc);
  664 
  665         /* Re-initialize the MIF */
  666         hme_mifinit(sc);
  667 
  668 #if 0
  669         /* Mask all MIF interrupts, just in case */
  670         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  671 #endif
  672 
  673         /* step 3. Setup data structures in host memory */
  674         if (hme_meminit(sc) != 0) {
  675                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  676                 return;
  677         }
  678 
  679         /* step 4. TX MAC registers & counters */
  680         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  681         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  682         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  683         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  684         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN);
  685 
  686         /* Load station MAC address */
  687         ea = sc->sc_arpcom.ac_enaddr;
  688         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  689         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  690         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  691 
  692         /*
  693          * Init seed for backoff
  694          * (source suggested by manual: low 10 bits of MAC address)
  695          */
  696         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  697         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  698 
  699 
  700         /* Note: Accepting power-on default for other MAC registers here.. */
  701 
  702         /* step 5. RX MAC registers & counters */
  703         hme_setladrf(sc, 0);
  704 
  705         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  706         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  707         /* Transmit Descriptor ring size: in increments of 16 */
  708         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  709 
  710         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  711         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN);
  712 
  713         /* step 8. Global Configuration & Interrupt Mask */
  714         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  715             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  716                 HME_SEB_STAT_HOSTTOTX |
  717                 HME_SEB_STAT_RXTOHOST |
  718                 HME_SEB_STAT_TXALL |
  719                 HME_SEB_STAT_TXPERR |
  720                 HME_SEB_STAT_RCNTEXP |
  721                 HME_SEB_STAT_ALL_ERRORS ));
  722 
  723         switch (sc->sc_burst) {
  724         default:
  725                 v = 0;
  726                 break;
  727         case 16:
  728                 v = HME_SEB_CFG_BURST16;
  729                 break;
  730         case 32:
  731                 v = HME_SEB_CFG_BURST32;
  732                 break;
  733         case 64:
  734                 v = HME_SEB_CFG_BURST64;
  735                 break;
  736         }
  737         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  738 
  739         /* step 9. ETX Configuration: use mostly default values */
  740 
  741         /* Enable DMA */
  742         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  743         v |= HME_ETX_CFG_DMAENABLE;
  744         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  745 
  746         /* step 10. ERX Configuration */
  747         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  748 
  749         /* Encode Receive Descriptor ring size: four possible values */
  750         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  751         switch (HME_NRXDESC) {
  752         case 32:
  753                 v |= HME_ERX_CFG_RINGSIZE32;
  754                 break;
  755         case 64:
  756                 v |= HME_ERX_CFG_RINGSIZE64;
  757                 break;
  758         case 128:
  759                 v |= HME_ERX_CFG_RINGSIZE128;
  760                 break;
  761         case 256:
  762                 v |= HME_ERX_CFG_RINGSIZE256;
  763                 break;
  764         default:
  765                 printf("hme: invalid Receive Descriptor ring size\n");
  766                 break;
  767         }
  768 
  769         /* Enable DMA, fix RX first byte offset. */
  770         v &= ~HME_ERX_CFG_FBO_MASK;
  771         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  772         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  773         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  774 
  775         /* step 11. XIF Configuration */
  776         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  777         v |= HME_MAC_XIF_OE;
  778         /* If an external transceiver is connected, enable its MII drivers */
  779         if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
  780                 v |= HME_MAC_XIF_MIIENABLE;
  781         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  782         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  783 
  784         /* step 12. RX_MAC Configuration Register */
  785         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  786         v |= HME_MAC_RXCFG_ENABLE;
  787         v &= ~(HME_MAC_RXCFG_DCRCS);
  788         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  789         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  790 
  791         /* step 13. TX_MAC Configuration Register */
  792         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  793         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  794         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  795         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  796 
  797         /* step 14. Issue Transmit Pending command */
  798 
  799 #ifdef HMEDEBUG
  800         /* Debug: double-check. */
  801         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  802             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  803             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  804             HME_ERX_READ_4(sc, HME_ERXI_RING),
  805             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  806         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  807             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  808             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  809             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  810         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  811             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  812             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  813 #endif
  814 
  815         /* Start the one second timer. */
  816         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  817 
  818         ifp->if_flags |= IFF_RUNNING;
  819         ifp->if_flags &= ~IFF_OACTIVE;
  820         ifp->if_timer = 0;
  821         hme_start(ifp);
  822 }
  823 
  824 struct hme_txdma_arg {
  825         struct hme_softc        *hta_sc;
  826         struct hme_txdesc       *hta_htx;
  827         int                     hta_ndescs;
  828 };
  829 
  830 /*
  831  * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
  832  * are readable from the nearest burst boundary on (i.e. potentially before
  833  * ds_addr) to the first boundary beyond the end. This is usually a safe
  834  * assumption to make, but is not documented.
  835  */
  836 static void
  837 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  838     bus_size_t totsz, int error)
  839 {
  840         struct hme_txdma_arg *ta = xsc;
  841         struct hme_txdesc *htx;
  842         bus_size_t len = 0;
  843         caddr_t txd;
  844         u_int32_t flags = 0;
  845         int i, tdhead, pci;
  846 
  847         if (error != 0)
  848                 return;
  849 
  850         tdhead = ta->hta_sc->sc_rb.rb_tdhead;
  851         pci = ta->hta_sc->sc_pci;
  852         txd = ta->hta_sc->sc_rb.rb_txd;
  853         htx = ta->hta_htx;
  854 
  855         if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
  856                 ta->hta_ndescs = -1;
  857                 return;
  858         }
  859         ta->hta_ndescs = nsegs;
  860 
  861         for (i = 0; i < nsegs; i++) {
  862                 if (segs[i].ds_len == 0)
  863                         continue;
  864 
  865                 /* Fill the ring entry. */
  866                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
  867                 if (len == 0)
  868                         flags |= HME_XD_SOP;
  869                 if (len + segs[i].ds_len == totsz)
  870                         flags |= HME_XD_EOP;
  871                 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
  872                     "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
  873                     (u_int)segs[i].ds_addr);
  874                 HME_XD_SETFLAGS(pci, txd, tdhead, flags);
  875                 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
  876 
  877                 ta->hta_sc->sc_rb.rb_td_nbusy++;
  878                 htx->htx_lastdesc = tdhead;
  879                 tdhead = (tdhead + 1) % HME_NTXDESC;
  880                 len += segs[i].ds_len;
  881         }
  882         ta->hta_sc->sc_rb.rb_tdhead = tdhead;
  883         KASSERT((flags & HME_XD_EOP) != 0,
  884             ("hme_txdma_callback: missed end of packet!"));
  885 }
  886 
  887 /*
  888  * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
  889  * start the transmission.
  890  * Returns 0 on success, -1 if there were not enough free descriptors to map
  891  * the packet, or an errno otherwise.
  892  */
  893 static int
  894 hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
  895 {
  896         struct hme_txdma_arg cba;
  897         struct hme_txdesc *td;
  898         int error, si, ri;
  899         u_int32_t flags;
  900 
  901         si = sc->sc_rb.rb_tdhead;
  902         if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  903                 return (-1);
  904         td->htx_m = m0;
  905         cba.hta_sc = sc;
  906         cba.hta_htx = td;
  907         if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
  908              m0, hme_txdma_callback, &cba, 0)) != 0)
  909                 goto fail;
  910         if (cba.hta_ndescs == -1) {
  911                 error = -1;
  912                 goto fail;
  913         }
  914         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  915             BUS_DMASYNC_PREWRITE);
  916 
  917         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
  918         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
  919 
  920         /* Turn descriptor ownership to the hme, back to forth. */
  921         ri = sc->sc_rb.rb_tdhead;
  922         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
  923             ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
  924         do {
  925                 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
  926                 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
  927                     HME_XD_OWN;
  928                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
  929                     ri, si, flags);
  930                 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
  931         } while (ri != si);
  932 
  933         /* start the transmission. */
  934         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
  935         return (0);
  936 fail:
  937         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  938         return (error);
  939 }
  940 
  941 /*
  942  * Pass a packet to the higher levels.
  943  */
  944 static void
  945 hme_read(struct hme_softc *sc, int ix, int len)
  946 {
  947         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  948         struct mbuf *m;
  949 
  950         if (len <= sizeof(struct ether_header) ||
  951             len > ETHERMTU + sizeof(struct ether_header)) {
  952 #ifdef HMEDEBUG
  953                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
  954                     len);
  955 #endif
  956                 ifp->if_ierrors++;
  957                 hme_discard_rxbuf(sc, ix);
  958                 return;
  959         }
  960 
  961         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
  962         CTR1(KTR_HME, "hme_read: len %d", len);
  963 
  964         if (hme_add_rxbuf(sc, ix, 0) != 0) {
  965                 /*
  966                  * hme_add_rxbuf will leave the old buffer in the ring until
  967                  * it is sure that a new buffer can be mapped. If it can not,
  968                  * drop the packet, but leave the interface up.
  969                  */
  970                 ifp->if_iqdrops++;
  971                 hme_discard_rxbuf(sc, ix);
  972                 return;
  973         }
  974 
  975         ifp->if_ipackets++;
  976 
  977         m->m_pkthdr.rcvif = ifp;
  978         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
  979         m_adj(m, HME_RXOFFS);
  980         /* Pass the packet up. */
  981         (*ifp->if_input)(ifp, m);
  982 }
  983 
  984 static void
  985 hme_start(struct ifnet *ifp)
  986 {
  987         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
  988         struct mbuf *m;
  989         int error, enq = 0;
  990 
  991         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
  992                 return;
  993 
  994         error = 0;
  995         for (;;) {
  996                 IF_DEQUEUE(&ifp->if_snd, m);
  997                 if (m == NULL)
  998                         break;
  999 
 1000                 error = hme_load_txmbuf(sc, m);
 1001                 if (error == -1) {
 1002                         ifp->if_flags |= IFF_OACTIVE;
 1003                         IF_PREPEND(&ifp->if_snd, m);
 1004                         break;
 1005                 } else if (error > 0) {
 1006                         printf("hme_start: error %d while loading mbuf\n",
 1007                             error);
 1008                 } else {
 1009                         enq = 1;
 1010                         BPF_MTAP(ifp, m);
 1011                 }
 1012         }
 1013 
 1014         if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
 1015                 ifp->if_flags |= IFF_OACTIVE;
 1016         /* Set watchdog timer if a packet was queued */
 1017         if (enq) {
 1018                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1019                     BUS_DMASYNC_PREWRITE);
 1020                 ifp->if_timer = 5;
 1021         }
 1022 }
 1023 
 1024 /*
 1025  * Transmit interrupt.
 1026  */
 1027 static void
 1028 hme_tint(struct hme_softc *sc)
 1029 {
 1030         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1031         struct hme_txdesc *htx;
 1032         unsigned int ri, txflags;
 1033 
 1034         /*
 1035          * Unload collision counters
 1036          */
 1037         ifp->if_collisions +=
 1038                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
 1039                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
 1040                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
 1041                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
 1042 
 1043         /*
 1044          * then clear the hardware counters.
 1045          */
 1046         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
 1047         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
 1048         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
 1049         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
 1050 
 1051         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1052         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1053         /* Fetch current position in the transmit ring */
 1054         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1055                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1056                         CTR0(KTR_HME, "hme_tint: not busy!");
 1057                         break;
 1058                 }
 1059 
 1060                 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
 1061                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1062 
 1063                 if ((txflags & HME_XD_OWN) != 0)
 1064                         break;
 1065 
 1066                 CTR0(KTR_HME, "hme_tint: not owned");
 1067                 --sc->sc_rb.rb_td_nbusy;
 1068                 ifp->if_flags &= ~IFF_OACTIVE;
 1069 
 1070                 /* Complete packet transmitted? */
 1071                 if ((txflags & HME_XD_EOP) == 0)
 1072                         continue;
 1073 
 1074                 KASSERT(htx->htx_lastdesc == ri,
 1075                     ("hme_tint: ring indices skewed: %d != %d!",
 1076                      htx->htx_lastdesc, ri));
 1077                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1078                     BUS_DMASYNC_POSTWRITE);
 1079                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1080 
 1081                 ifp->if_opackets++;
 1082                 m_freem(htx->htx_m);
 1083                 htx->htx_m = NULL;
 1084                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1085                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1086                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1087         }
 1088         /* Turn off watchdog */
 1089         if (sc->sc_rb.rb_td_nbusy == 0)
 1090                 ifp->if_timer = 0;
 1091 
 1092         /* Update ring */
 1093         sc->sc_rb.rb_tdtail = ri;
 1094 
 1095         hme_start(ifp);
 1096 
 1097         if (sc->sc_rb.rb_td_nbusy == 0)
 1098                 ifp->if_timer = 0;
 1099 }
 1100 
 1101 /*
 1102  * Receive interrupt.
 1103  */
 1104 static void
 1105 hme_rint(struct hme_softc *sc)
 1106 {
 1107         caddr_t xdr = sc->sc_rb.rb_rxd;
 1108         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1109         unsigned int ri, len;
 1110         int progress = 0;
 1111         u_int32_t flags;
 1112 
 1113         /*
 1114          * Process all buffers with valid data.
 1115          */
 1116         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1117         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1118                 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
 1119                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1120                 if ((flags & HME_XD_OWN) != 0)
 1121                         break;
 1122 
 1123                 progress++;
 1124                 if ((flags & HME_XD_OFL) != 0) {
 1125                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1126                             "flags=0x%x\n", ri, flags);
 1127                         ifp->if_ierrors++;
 1128                         hme_discard_rxbuf(sc, ri);
 1129                 } else {
 1130                         len = HME_XD_DECODE_RSIZE(flags);
 1131                         hme_read(sc, ri, len);
 1132                 }
 1133         }
 1134         if (progress) {
 1135                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1136                     BUS_DMASYNC_PREWRITE);
 1137         }
 1138         sc->sc_rb.rb_rdtail = ri;
 1139 }
 1140 
 1141 static void
 1142 hme_eint(struct hme_softc *sc, u_int status)
 1143 {
 1144 
 1145         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1146                 device_printf(sc->sc_dev, "XXXlink status changed\n");
 1147                 return;
 1148         }
 1149 
 1150         HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1151 }
 1152 
 1153 void
 1154 hme_intr(void *v)
 1155 {
 1156         struct hme_softc *sc = (struct hme_softc *)v;
 1157         u_int32_t status;
 1158 
 1159         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1160         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1161 
 1162         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1163                 hme_eint(sc, status);
 1164 
 1165         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1166                 hme_tint(sc);
 1167 
 1168         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1169                 hme_rint(sc);
 1170 }
 1171 
 1172 
 1173 static void
 1174 hme_watchdog(struct ifnet *ifp)
 1175 {
 1176         struct hme_softc *sc = ifp->if_softc;
 1177 #ifdef HMEDEBUG
 1178         u_int32_t status;
 1179 
 1180         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1181         CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
 1182 #endif
 1183         device_printf(sc->sc_dev, "device timeout\n");
 1184         ++ifp->if_oerrors;
 1185 
 1186         hme_reset(sc);
 1187 }
 1188 
 1189 /*
 1190  * Initialize the MII Management Interface
 1191  */
 1192 static void
 1193 hme_mifinit(struct hme_softc *sc)
 1194 {
 1195         u_int32_t v;
 1196 
 1197         /* Configure the MIF in frame mode */
 1198         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1199         v &= ~HME_MIF_CFG_BBMODE;
 1200         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1201 }
 1202 
 1203 /*
 1204  * MII interface
 1205  */
 1206 int
 1207 hme_mii_readreg(device_t dev, int phy, int reg)
 1208 {
 1209         struct hme_softc *sc = device_get_softc(dev);
 1210         int n;
 1211         u_int32_t v;
 1212 
 1213         /* Select the desired PHY in the MIF configuration register */
 1214         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1215         /* Clear PHY select bit */
 1216         v &= ~HME_MIF_CFG_PHY;
 1217         if (phy == HME_PHYAD_EXTERNAL)
 1218                 /* Set PHY select bit to get at external device */
 1219                 v |= HME_MIF_CFG_PHY;
 1220         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1221 
 1222         /* Construct the frame command */
 1223         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1224             HME_MIF_FO_TAMSB |
 1225             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1226             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1227             (reg << HME_MIF_FO_REGAD_SHIFT);
 1228 
 1229         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1230         for (n = 0; n < 100; n++) {
 1231                 DELAY(1);
 1232                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1233                 if (v & HME_MIF_FO_TALSB)
 1234                         return (v & HME_MIF_FO_DATA);
 1235         }
 1236 
 1237         device_printf(sc->sc_dev, "mii_read timeout\n");
 1238         return (0);
 1239 }
 1240 
 1241 int
 1242 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1243 {
 1244         struct hme_softc *sc = device_get_softc(dev);
 1245         int n;
 1246         u_int32_t v;
 1247 
 1248         /* Select the desired PHY in the MIF configuration register */
 1249         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1250         /* Clear PHY select bit */
 1251         v &= ~HME_MIF_CFG_PHY;
 1252         if (phy == HME_PHYAD_EXTERNAL)
 1253                 /* Set PHY select bit to get at external device */
 1254                 v |= HME_MIF_CFG_PHY;
 1255         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1256 
 1257         /* Construct the frame command */
 1258         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1259             HME_MIF_FO_TAMSB                            |
 1260             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1261             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1262             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1263             (val & HME_MIF_FO_DATA);
 1264 
 1265         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1266         for (n = 0; n < 100; n++) {
 1267                 DELAY(1);
 1268                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1269                 if (v & HME_MIF_FO_TALSB)
 1270                         return (1);
 1271         }
 1272 
 1273         device_printf(sc->sc_dev, "mii_write timeout\n");
 1274         return (0);
 1275 }
 1276 
 1277 void
 1278 hme_mii_statchg(device_t dev)
 1279 {
 1280         struct hme_softc *sc = device_get_softc(dev);
 1281         int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
 1282         int phy = sc->sc_phys[instance];
 1283         u_int32_t v;
 1284 
 1285 #ifdef HMEDEBUG
 1286         if (sc->sc_debug)
 1287                 printf("hme_mii_statchg: status change: phy = %d\n", phy);
 1288 #endif
 1289 
 1290         /* Select the current PHY in the MIF configuration register */
 1291         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1292         v &= ~HME_MIF_CFG_PHY;
 1293         if (phy == HME_PHYAD_EXTERNAL)
 1294                 v |= HME_MIF_CFG_PHY;
 1295         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1296 
 1297         /* Set the MAC Full Duplex bit appropriately */
 1298         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1299         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
 1300                 return;
 1301         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1302                 v |= HME_MAC_TXCFG_FULLDPLX;
 1303         else
 1304                 v &= ~HME_MAC_TXCFG_FULLDPLX;
 1305         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
 1306         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
 1307                 return;
 1308 }
 1309 
 1310 static int
 1311 hme_mediachange(struct ifnet *ifp)
 1312 {
 1313         struct hme_softc *sc = ifp->if_softc;
 1314 
 1315         return (mii_mediachg(sc->sc_mii));
 1316 }
 1317 
 1318 static void
 1319 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1320 {
 1321         struct hme_softc *sc = ifp->if_softc;
 1322 
 1323         if ((ifp->if_flags & IFF_UP) == 0)
 1324                 return;
 1325 
 1326         mii_pollstat(sc->sc_mii);
 1327         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1328         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1329 }
 1330 
 1331 /*
 1332  * Process an ioctl request.
 1333  */
 1334 static int
 1335 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1336 {
 1337         struct hme_softc *sc = ifp->if_softc;
 1338         struct ifreq *ifr = (struct ifreq *)data;
 1339         int s, error = 0;
 1340 
 1341         s = splnet();
 1342 
 1343         switch (cmd) {
 1344         case SIOCSIFFLAGS:
 1345                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1346                     (ifp->if_flags & IFF_RUNNING) != 0) {
 1347                         /*
 1348                          * If interface is marked down and it is running, then
 1349                          * stop it.
 1350                          */
 1351                         hme_stop(sc);
 1352                         ifp->if_flags &= ~IFF_RUNNING;
 1353                 } else if ((ifp->if_flags & IFF_UP) != 0 &&
 1354                            (ifp->if_flags & IFF_RUNNING) == 0) {
 1355                         /*
 1356                          * If interface is marked up and it is stopped, then
 1357                          * start it.
 1358                          */
 1359                         hme_init(sc);
 1360                 } else if ((ifp->if_flags & IFF_UP) != 0) {
 1361                         /*
 1362                          * Reset the interface to pick up changes in any other
 1363                          * flags that affect hardware registers.
 1364                          */
 1365                         hme_init(sc);
 1366                 }
 1367 #ifdef HMEDEBUG
 1368                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1369 #endif
 1370                 break;
 1371 
 1372         case SIOCADDMULTI:
 1373         case SIOCDELMULTI:
 1374                 hme_setladrf(sc, 1);
 1375                 error = 0;
 1376                 break;
 1377         case SIOCGIFMEDIA:
 1378         case SIOCSIFMEDIA:
 1379                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1380                 break;
 1381         default:
 1382                 error = ether_ioctl(ifp, cmd, data);
 1383                 break;
 1384         }
 1385 
 1386         splx(s);
 1387         return (error);
 1388 }
 1389 
 1390 /*
 1391  * Set up the logical address filter.
 1392  */
 1393 static void
 1394 hme_setladrf(struct hme_softc *sc, int reenable)
 1395 {
 1396         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1397         struct ifmultiaddr *inm;
 1398         struct sockaddr_dl *sdl;
 1399         u_char *cp;
 1400         u_int32_t crc;
 1401         u_int32_t hash[4];
 1402         u_int32_t macc;
 1403         int len;
 1404 
 1405         /* Clear hash table */
 1406         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1407 
 1408         /* Get current RX configuration */
 1409         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1410 
 1411         /*
 1412          * Disable the receiver while changing it's state as the documentation
 1413          * mandates.
 1414          * We then must wait until the bit clears in the register. This should
 1415          * take at most 3.5ms.
 1416          */
 1417         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
 1418                 return;
 1419         /* Disable the hash filter before writing to the filter registers. */
 1420         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1421             HME_MAC_RXCFG_HENABLE, 0))
 1422                 return;
 1423 
 1424         if (reenable)
 1425                 macc |= HME_MAC_RXCFG_ENABLE;
 1426         else
 1427                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1428 
 1429         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1430                 /* Turn on promiscuous mode; turn off the hash filter */
 1431                 macc |= HME_MAC_RXCFG_PMISC;
 1432                 macc &= ~HME_MAC_RXCFG_HENABLE;
 1433                 ifp->if_flags |= IFF_ALLMULTI;
 1434                 goto chipit;
 1435         }
 1436 
 1437         /* Turn off promiscuous mode; turn on the hash filter */
 1438         macc &= ~HME_MAC_RXCFG_PMISC;
 1439         macc |= HME_MAC_RXCFG_HENABLE;
 1440 
 1441         /*
 1442          * Set up multicast address filter by passing all multicast addresses
 1443          * through a crc generator, and then using the high order 6 bits as an
 1444          * index into the 64 bit logical address filter.  The high order bit
 1445          * selects the word, while the rest of the bits select the bit within
 1446          * the word.
 1447          */
 1448 
 1449         TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
 1450                 if (inm->ifma_addr->sa_family != AF_LINK)
 1451                         continue;
 1452                 sdl = (struct sockaddr_dl *)inm->ifma_addr;
 1453                 cp = LLADDR(sdl);
 1454                 crc = 0xffffffff;
 1455                 for (len = sdl->sdl_alen; --len >= 0;) {
 1456                         int octet = *cp++;
 1457                         int i;
 1458 
 1459 #define MC_POLY_LE      0xedb88320UL    /* mcast crc, little endian */
 1460                         for (i = 0; i < 8; i++) {
 1461                                 if ((crc & 1) ^ (octet & 1)) {
 1462                                         crc >>= 1;
 1463                                         crc ^= MC_POLY_LE;
 1464                                 } else {
 1465                                         crc >>= 1;
 1466                                 }
 1467                                 octet >>= 1;
 1468                         }
 1469                 }
 1470                 /* Just want the 6 most significant bits. */
 1471                 crc >>= 26;
 1472 
 1473                 /* Set the corresponding bit in the filter. */
 1474                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1475         }
 1476 
 1477         ifp->if_flags &= ~IFF_ALLMULTI;
 1478 
 1479 chipit:
 1480         /* Now load the hash table into the chip */
 1481         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1482         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1483         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1484         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1485         hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1486             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
 1487 }

Cache object: 7044ad1fc38f6767b5285a74ceba09cc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.