The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/5.3/sys/dev/hme/if_hme.c 133688 2004-08-13 23:14:50Z rwatson $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #define HMEDEBUG
   65 #define KTR_HME         KTR_CT2         /* XXX */
   66 
   67 #include <sys/param.h>
   68 #include <sys/systm.h>
   69 #include <sys/bus.h>
   70 #include <sys/endian.h>
   71 #include <sys/kernel.h>
   72 #include <sys/module.h>
   73 #include <sys/ktr.h>
   74 #include <sys/mbuf.h>
   75 #include <sys/malloc.h>
   76 #include <sys/socket.h>
   77 #include <sys/sockio.h>
   78 
   79 #include <net/bpf.h>
   80 #include <net/ethernet.h>
   81 #include <net/if.h>
   82 #include <net/if_arp.h>
   83 #include <net/if_dl.h>
   84 #include <net/if_media.h>
   85 #include <net/if_vlan_var.h>
   86 
   87 #include <netinet/in.h>
   88 #include <netinet/in_systm.h>
   89 #include <netinet/ip.h>
   90 #include <netinet/tcp.h>
   91 #include <netinet/udp.h>
   92 
   93 #include <dev/mii/mii.h>
   94 #include <dev/mii/miivar.h>
   95 
   96 #include <machine/bus.h>
   97 
   98 #include <dev/hme/if_hmereg.h>
   99 #include <dev/hme/if_hmevar.h>
  100 
  101 static void     hme_start(struct ifnet *);
  102 static void     hme_stop(struct hme_softc *);
  103 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  104 static void     hme_tick(void *);
  105 static void     hme_watchdog(struct ifnet *);
  106 static void     hme_init(void *);
  107 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  108 static int      hme_meminit(struct hme_softc *);
  109 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  110     u_int32_t, u_int32_t);
  111 static void     hme_mifinit(struct hme_softc *);
  112 static void     hme_reset(struct hme_softc *);
  113 static void     hme_setladrf(struct hme_softc *, int);
  114 
  115 static int      hme_mediachange(struct ifnet *);
  116 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  117 
  118 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf *);
  119 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  120 static void     hme_eint(struct hme_softc *, u_int);
  121 static void     hme_rint(struct hme_softc *);
  122 static void     hme_tint(struct hme_softc *);
  123 static void     hme_txcksum(struct mbuf *, u_int32_t *);
  124 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  125 
  126 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  127 static void     hme_rxdma_callback(void *, bus_dma_segment_t *, int,
  128     bus_size_t, int);
  129 static void     hme_txdma_callback(void *, bus_dma_segment_t *, int,
  130     bus_size_t, int);
  131 
  132 devclass_t hme_devclass;
  133 
  134 static int hme_nerr;
  135 
  136 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  137 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  138 
  139 #define HME_SPC_READ_4(spc, sc, offs) \
  140         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  141             (offs))
  142 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  143         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  144             (offs), (v))
  145 
  146 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  147 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  148 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  149 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  150 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  151 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  152 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  153 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  154 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  155 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  156 
  157 #define HME_MAXERR      5
  158 #define HME_WHINE(dev, ...) do {                                        \
  159         if (hme_nerr++ < HME_MAXERR)                                    \
  160                 device_printf(dev, __VA_ARGS__);                        \
  161         if (hme_nerr == HME_MAXERR) {                                   \
  162                 device_printf(dev, "too may errors; not reporting any " \
  163                     "more\n");                                          \
  164         }                                                               \
  165 } while(0)
  166 
  167 /* Support oversized VLAN frames. */
  168 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  169 
  170 int
  171 hme_config(struct hme_softc *sc)
  172 {
  173         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  174         struct mii_softc *child;
  175         bus_size_t size;
  176         int error, rdesc, tdesc, i;
  177 
  178         /*
  179          * HME common initialization.
  180          *
  181          * hme_softc fields that must be initialized by the front-end:
  182          *
  183          * the DMA bus tag:
  184          *      sc_dmatag
  185          *
  186          * the bus handles, tags and offsets (splitted for SBus compatability):
  187          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  188          *      sc_erx{t,h,o}   (Receiver Unit registers)
  189          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  190          *      sc_mac{t,h,o}   (MAC registers)
  191          *      sc_mif{t,h,o}   (Management Interface registers)
  192          *
  193          * the maximum bus burst size:
  194          *      sc_burst
  195          *
  196          */
  197 
  198         /* Make sure the chip is stopped. */
  199         hme_stop(sc);
  200 
  201         /*
  202          * Allocate DMA capable memory
  203          * Buffer descriptors must be aligned on a 2048 byte boundary;
  204          * take this into account when calculating the size. Note that
  205          * the maximum number of descriptors (256) occupies 2048 bytes,
  206          * so we allocate that much regardless of HME_N*DESC.
  207          */
  208         size =  4096;
  209 
  210         error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
  211             BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
  212             BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
  213         if (error)
  214                 return (error);
  215 
  216         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  217             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  218             1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
  219             &Giant, &sc->sc_cdmatag);
  220         if (error)
  221                 goto fail_ptag;
  222 
  223         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  224             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  225             HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  226             NULL, NULL, &sc->sc_rdmatag);
  227         if (error)
  228                 goto fail_ctag;
  229 
  230         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  231             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  232             HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
  233             NULL, NULL, &sc->sc_tdmatag);
  234         if (error)
  235                 goto fail_rtag;
  236 
  237         /* Allocate control/TX DMA buffer */
  238         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  239             0, &sc->sc_cdmamap);
  240         if (error != 0) {
  241                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  242                 goto fail_ttag;
  243         }
  244 
  245         /* Load the buffer */
  246         sc->sc_rb.rb_dmabase = 0;
  247         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  248              sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  249             sc->sc_rb.rb_dmabase == 0) {
  250                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  251                     error);
  252                 goto fail_free;
  253         }
  254         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  255             sc->sc_rb.rb_dmabase);
  256 
  257         /*
  258          * Prepare the RX descriptors. rdesc serves as marker for the last
  259          * processed descriptor and may be used later on.
  260          */
  261         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  262                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  263                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  264                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  265                 if (error != 0)
  266                         goto fail_rxdesc;
  267         }
  268         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  269             &sc->sc_rb.rb_spare_dmamap);
  270         if (error != 0)
  271                 goto fail_rxdesc;
  272         /* Same for the TX descs. */
  273         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  274                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  275                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  276                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  277                 if (error != 0)
  278                         goto fail_txdesc;
  279         }
  280 
  281         sc->sc_csum_features = HME_CSUM_FEATURES;
  282         /* Initialize ifnet structure. */
  283         ifp->if_softc = sc;
  284         if_initname(ifp, device_get_name(sc->sc_dev),
  285             device_get_unit(sc->sc_dev));
  286         ifp->if_mtu = ETHERMTU;
  287         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
  288             IFF_NEEDSGIANT;
  289         ifp->if_start = hme_start;
  290         ifp->if_ioctl = hme_ioctl;
  291         ifp->if_init = hme_init;
  292         ifp->if_watchdog = hme_watchdog;
  293         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  294         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  295         IFQ_SET_READY(&ifp->if_snd);
  296 
  297         hme_mifinit(sc);
  298 
  299         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  300             hme_mediastatus)) != 0) {
  301                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  302                 goto fail_rxdesc;
  303         }
  304         sc->sc_mii = device_get_softc(sc->sc_miibus);
  305 
  306         /*
  307          * Walk along the list of attached MII devices and
  308          * establish an `MII instance' to `phy number'
  309          * mapping. We'll use this mapping in media change
  310          * requests to determine which phy to use to program
  311          * the MIF configuration register.
  312          */
  313         for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
  314              child = LIST_NEXT(child, mii_list)) {
  315                 /*
  316                  * Note: we support just two PHYs: the built-in
  317                  * internal device and an external on the MII
  318                  * connector.
  319                  */
  320                 if (child->mii_phy > 1 || child->mii_inst > 1) {
  321                         device_printf(sc->sc_dev, "cannot accommodate "
  322                             "MII device %s at phy %d, instance %d\n",
  323                             device_get_name(child->mii_dev),
  324                             child->mii_phy, child->mii_inst);
  325                         continue;
  326                 }
  327 
  328                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  329         }
  330 
  331         /* Attach the interface. */
  332         ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
  333 
  334         /*
  335          * Tell the upper layer(s) we support long frames/checksum offloads.
  336          */
  337         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  338         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  339         ifp->if_hwassist |= sc->sc_csum_features;
  340         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  341 
  342         callout_init(&sc->sc_tick_ch, 0);
  343         return (0);
  344 
  345 fail_txdesc:
  346         for (i = 0; i < tdesc; i++) {
  347                 bus_dmamap_destroy(sc->sc_tdmatag,
  348                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  349         }
  350         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  351 fail_rxdesc:
  352         for (i = 0; i < rdesc; i++) {
  353                 bus_dmamap_destroy(sc->sc_rdmatag,
  354                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  355         }
  356         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  357 fail_free:
  358         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  359 fail_ttag:
  360         bus_dma_tag_destroy(sc->sc_tdmatag);
  361 fail_rtag:
  362         bus_dma_tag_destroy(sc->sc_rdmatag);
  363 fail_ctag:
  364         bus_dma_tag_destroy(sc->sc_cdmatag);
  365 fail_ptag:
  366         bus_dma_tag_destroy(sc->sc_pdmatag);
  367         return (error);
  368 }
  369 
  370 void
  371 hme_detach(struct hme_softc *sc)
  372 {
  373         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  374         int i;
  375 
  376         ether_ifdetach(ifp);
  377         hme_stop(sc);
  378         device_delete_child(sc->sc_dev, sc->sc_miibus);
  379 
  380         for (i = 0; i < HME_NTXQ; i++) {
  381                 bus_dmamap_destroy(sc->sc_tdmatag,
  382                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  383         }
  384         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  385         for (i = 0; i < HME_NRXDESC; i++) {
  386                 bus_dmamap_destroy(sc->sc_rdmatag,
  387                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  388         }
  389         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
  390         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
  391         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  392         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  393         bus_dma_tag_destroy(sc->sc_tdmatag);
  394         bus_dma_tag_destroy(sc->sc_rdmatag);
  395         bus_dma_tag_destroy(sc->sc_cdmatag);
  396         bus_dma_tag_destroy(sc->sc_pdmatag);
  397 }
  398 
  399 void
  400 hme_suspend(struct hme_softc *sc)
  401 {
  402 
  403         hme_stop(sc);
  404 }
  405 
  406 void
  407 hme_resume(struct hme_softc *sc)
  408 {
  409         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  410 
  411         if ((ifp->if_flags & IFF_UP) != 0)
  412                 hme_init(ifp);
  413 }
  414 
  415 static void
  416 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  417 {
  418         struct hme_softc *sc = (struct hme_softc *)xsc;
  419 
  420         if (error != 0)
  421                 return;
  422         KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
  423         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  424 }
  425 
  426 static void
  427 hme_tick(void *arg)
  428 {
  429         struct hme_softc *sc = arg;
  430         int s;
  431 
  432         s = splnet();
  433         mii_tick(sc->sc_mii);
  434         splx(s);
  435 
  436         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  437 }
  438 
  439 static void
  440 hme_reset(struct hme_softc *sc)
  441 {
  442         int s;
  443 
  444         s = splnet();
  445         hme_init(sc);
  446         splx(s);
  447 }
  448 
  449 static void
  450 hme_stop(struct hme_softc *sc)
  451 {
  452         u_int32_t v;
  453         int n;
  454 
  455         callout_stop(&sc->sc_tick_ch);
  456 
  457         /* Reset transmitter and receiver */
  458         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  459             HME_SEB_RESET_ERX);
  460 
  461         for (n = 0; n < 20; n++) {
  462                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  463                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  464                         return;
  465                 DELAY(20);
  466         }
  467 
  468         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  469 }
  470 
  471 static void
  472 hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  473     bus_size_t totsize, int error)
  474 {
  475         bus_addr_t *a = xsc;
  476 
  477         KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
  478         if (error != 0)
  479                 return;
  480         *a = segs[0].ds_addr;
  481 }
  482 
  483 /*
  484  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  485  * ring for subsequent use.
  486  */
  487 static __inline void
  488 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  489 {
  490 
  491         /*
  492          * Dropped a packet, reinitialize the descriptor and turn the
  493          * ownership back to the hardware.
  494          */
  495         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
  496             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
  497 }
  498 
  499 static int
  500 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  501 {
  502         struct hme_rxdesc *rd;
  503         struct mbuf *m;
  504         bus_addr_t ba;
  505         bus_dmamap_t map;
  506         uintptr_t b;
  507         int a, unmap;
  508 
  509         rd = &sc->sc_rb.rb_rxdesc[ri];
  510         unmap = rd->hrx_m != NULL;
  511         if (unmap && keepold) {
  512                 /*
  513                  * Reinitialize the descriptor flags, as they may have been
  514                  * altered by the hardware.
  515                  */
  516                 hme_discard_rxbuf(sc, ri);
  517                 return (0);
  518         }
  519         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  520                 return (ENOBUFS);
  521         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  522         b = mtod(m, uintptr_t);
  523         /*
  524          * Required alignment boundary. At least 16 is needed, but since
  525          * the mapping must be done in a way that a burst can start on a
  526          * natural boundary we might need to extend this.
  527          */
  528         a = max(HME_MINRXALIGN, sc->sc_burst);
  529         /*
  530          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  531          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  532          * alignment of the header adjacent to the ethernet header, which
  533          * should be sufficient in all cases. Nevertheless, this second-guesses
  534          * ALIGN().
  535          */
  536         m_adj(m, roundup2(b, a) - b);
  537         if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  538             m, hme_rxdma_callback, &ba, 0) != 0) {
  539                 m_freem(m);
  540                 return (ENOBUFS);
  541         }
  542         if (unmap) {
  543                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  544                     BUS_DMASYNC_POSTREAD);
  545                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  546         }
  547         map = rd->hrx_dmamap;
  548         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  549         sc->sc_rb.rb_spare_dmamap = map;
  550         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  551         HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
  552         rd->hrx_m = m;
  553         HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
  554             HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  555         return (0);
  556 }
  557 
  558 static int
  559 hme_meminit(struct hme_softc *sc)
  560 {
  561         struct hme_ring *hr = &sc->sc_rb;
  562         struct hme_txdesc *td;
  563         bus_addr_t dma;
  564         caddr_t p;
  565         unsigned int i;
  566         int error;
  567 
  568         p = hr->rb_membase;
  569         dma = hr->rb_dmabase;
  570 
  571         /*
  572          * Allocate transmit descriptors
  573          */
  574         hr->rb_txd = p;
  575         hr->rb_txddma = dma;
  576         p += HME_NTXDESC * HME_XD_SIZE;
  577         dma += HME_NTXDESC * HME_XD_SIZE;
  578         /* We have reserved descriptor space until the next 2048 byte boundary.*/
  579         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  580         p = (caddr_t)roundup((u_long)p, 2048);
  581 
  582         /*
  583          * Allocate receive descriptors
  584          */
  585         hr->rb_rxd = p;
  586         hr->rb_rxddma = dma;
  587         p += HME_NRXDESC * HME_XD_SIZE;
  588         dma += HME_NRXDESC * HME_XD_SIZE;
  589         /* Again move forward to the next 2048 byte boundary.*/
  590         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  591         p = (caddr_t)roundup((u_long)p, 2048);
  592 
  593         /*
  594          * Initialize transmit buffer descriptors
  595          */
  596         for (i = 0; i < HME_NTXDESC; i++) {
  597                 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
  598                 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
  599         }
  600 
  601         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  602         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  603         for (i = 0; i < HME_NTXQ; i++) {
  604                 td = &sc->sc_rb.rb_txdesc[i];
  605                 if (td->htx_m != NULL) {
  606                         m_freem(td->htx_m);
  607                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  608                             BUS_DMASYNC_POSTWRITE);
  609                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  610                         td->htx_m = NULL;
  611                 }
  612                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  613         }
  614 
  615         /*
  616          * Initialize receive buffer descriptors
  617          */
  618         for (i = 0; i < HME_NRXDESC; i++) {
  619                 error = hme_add_rxbuf(sc, i, 1);
  620                 if (error != 0)
  621                         return (error);
  622         }
  623 
  624         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
  625         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
  626 
  627         hr->rb_tdhead = hr->rb_tdtail = 0;
  628         hr->rb_td_nbusy = 0;
  629         hr->rb_rdtail = 0;
  630         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  631             hr->rb_txddma);
  632         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  633             hr->rb_rxddma);
  634         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  635             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  636         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  637             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  638         return (0);
  639 }
  640 
  641 static int
  642 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  643     u_int32_t clr, u_int32_t set)
  644 {
  645         int i = 0;
  646 
  647         val &= ~clr;
  648         val |= set;
  649         HME_MAC_WRITE_4(sc, reg, val);
  650         if (clr == 0 && set == 0)
  651                 return (1);     /* just write, no bits to wait for */
  652         do {
  653                 DELAY(100);
  654                 i++;
  655                 val = HME_MAC_READ_4(sc, reg);
  656                 if (i > 40) {
  657                         /* After 3.5ms, we should have been done. */
  658                         device_printf(sc->sc_dev, "timeout while writing to "
  659                             "MAC configuration register\n");
  660                         return (0);
  661                 }
  662         } while ((val & clr) != 0 && (val & set) != set);
  663         return (1);
  664 }
  665 
  666 /*
  667  * Initialization of interface; set up initialization block
  668  * and transmit/receive descriptor rings.
  669  */
  670 static void
  671 hme_init(void *xsc)
  672 {
  673         struct hme_softc *sc = (struct hme_softc *)xsc;
  674         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
  675         u_int8_t *ea;
  676         u_int32_t n, v;
  677 
  678         /*
  679          * Initialization sequence. The numbered steps below correspond
  680          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  681          * Channel Engine manual (part of the PCIO manual).
  682          * See also the STP2002-STQ document from Sun Microsystems.
  683          */
  684 
  685         /* step 1 & 2. Reset the Ethernet Channel */
  686         hme_stop(sc);
  687 
  688         /* Re-initialize the MIF */
  689         hme_mifinit(sc);
  690 
  691 #if 0
  692         /* Mask all MIF interrupts, just in case */
  693         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  694 #endif
  695 
  696         /* step 3. Setup data structures in host memory */
  697         if (hme_meminit(sc) != 0) {
  698                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  699                 return;
  700         }
  701 
  702         /* step 4. TX MAC registers & counters */
  703         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  704         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  705         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  706         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  707         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  708 
  709         /* Load station MAC address */
  710         ea = sc->sc_arpcom.ac_enaddr;
  711         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  712         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  713         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  714 
  715         /*
  716          * Init seed for backoff
  717          * (source suggested by manual: low 10 bits of MAC address)
  718          */
  719         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  720         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  721 
  722 
  723         /* Note: Accepting power-on default for other MAC registers here.. */
  724 
  725         /* step 5. RX MAC registers & counters */
  726         hme_setladrf(sc, 0);
  727 
  728         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  729         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  730         /* Transmit Descriptor ring size: in increments of 16 */
  731         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  732 
  733         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  734         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  735 
  736         /* step 8. Global Configuration & Interrupt Mask */
  737         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  738             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  739                 HME_SEB_STAT_HOSTTOTX |
  740                 HME_SEB_STAT_RXTOHOST |
  741                 HME_SEB_STAT_TXALL |
  742                 HME_SEB_STAT_TXPERR |
  743                 HME_SEB_STAT_RCNTEXP |
  744                 HME_SEB_STAT_ALL_ERRORS ));
  745 
  746         switch (sc->sc_burst) {
  747         default:
  748                 v = 0;
  749                 break;
  750         case 16:
  751                 v = HME_SEB_CFG_BURST16;
  752                 break;
  753         case 32:
  754                 v = HME_SEB_CFG_BURST32;
  755                 break;
  756         case 64:
  757                 v = HME_SEB_CFG_BURST64;
  758                 break;
  759         }
  760         /*
  761          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  762          * Allowing 64bit transfers breaks TX checksum offload as well.
  763          * Don't know this comes from hardware bug or driver's DMAing
  764          * scheme.
  765          *
  766          * if (sc->sc_pci == 0)
  767          *      v |= HME_SEB_CFG_64BIT;
  768          */
  769         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  770 
  771         /* step 9. ETX Configuration: use mostly default values */
  772 
  773         /* Enable DMA */
  774         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  775         v |= HME_ETX_CFG_DMAENABLE;
  776         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  777 
  778         /* step 10. ERX Configuration */
  779         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  780 
  781         /* Encode Receive Descriptor ring size: four possible values */
  782         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  783         switch (HME_NRXDESC) {
  784         case 32:
  785                 v |= HME_ERX_CFG_RINGSIZE32;
  786                 break;
  787         case 64:
  788                 v |= HME_ERX_CFG_RINGSIZE64;
  789                 break;
  790         case 128:
  791                 v |= HME_ERX_CFG_RINGSIZE128;
  792                 break;
  793         case 256:
  794                 v |= HME_ERX_CFG_RINGSIZE256;
  795                 break;
  796         default:
  797                 printf("hme: invalid Receive Descriptor ring size\n");
  798                 break;
  799         }
  800 
  801         /* Enable DMA, fix RX first byte offset. */
  802         v &= ~HME_ERX_CFG_FBO_MASK;
  803         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  804         /* RX TCP/UDP checksum offset */
  805         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  806         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  807         v |= n;
  808         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  809         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  810 
  811         /* step 11. XIF Configuration */
  812         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  813         v |= HME_MAC_XIF_OE;
  814         /* If an external transceiver is connected, enable its MII drivers */
  815         if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
  816                 v |= HME_MAC_XIF_MIIENABLE;
  817         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  818         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  819 
  820         /* step 12. RX_MAC Configuration Register */
  821         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  822         v |= HME_MAC_RXCFG_ENABLE;
  823         v &= ~(HME_MAC_RXCFG_DCRCS);
  824         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  825         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  826 
  827         /* step 13. TX_MAC Configuration Register */
  828         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  829         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  830         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  831         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  832 
  833         /* step 14. Issue Transmit Pending command */
  834 
  835 #ifdef HMEDEBUG
  836         /* Debug: double-check. */
  837         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  838             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  839             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  840             HME_ERX_READ_4(sc, HME_ERXI_RING),
  841             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  842         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  843             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  844             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  845             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  846         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  847             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  848             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  849 #endif
  850 
  851         /* Set the current media. */
  852         /* mii_mediachg(sc->sc_mii); */
  853 
  854         /* Start the one second timer. */
  855         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  856 
  857         ifp->if_flags |= IFF_RUNNING;
  858         ifp->if_flags &= ~IFF_OACTIVE;
  859         ifp->if_timer = 0;
  860         hme_start(ifp);
  861 }
  862 
  863 struct hme_txdma_arg {
  864         struct hme_softc        *hta_sc;
  865         struct hme_txdesc       *hta_htx;
  866         int                     hta_ndescs;
  867 };
  868 
  869 /*
  870  * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
  871  * are readable from the nearest burst boundary on (i.e. potentially before
  872  * ds_addr) to the first boundary beyond the end. This is usually a safe
  873  * assumption to make, but is not documented.
  874  */
  875 static void
  876 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
  877     bus_size_t totsz, int error)
  878 {
  879         struct hme_txdma_arg *ta = xsc;
  880         struct hme_txdesc *htx;
  881         bus_size_t len = 0;
  882         caddr_t txd;
  883         u_int32_t flags = 0;
  884         int i, tdhead, pci;
  885 
  886         if (error != 0)
  887                 return;
  888 
  889         tdhead = ta->hta_sc->sc_rb.rb_tdhead;
  890         pci = ta->hta_sc->sc_pci;
  891         txd = ta->hta_sc->sc_rb.rb_txd;
  892         htx = ta->hta_htx;
  893 
  894         if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
  895                 ta->hta_ndescs = -1;
  896                 return;
  897         }
  898         ta->hta_ndescs = nsegs;
  899 
  900         for (i = 0; i < nsegs; i++) {
  901                 if (segs[i].ds_len == 0)
  902                         continue;
  903 
  904                 /* Fill the ring entry. */
  905                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
  906                 if (len == 0)
  907                         flags |= HME_XD_SOP;
  908                 if (len + segs[i].ds_len == totsz)
  909                         flags |= HME_XD_EOP;
  910                 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
  911                     "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
  912                     (u_int)segs[i].ds_addr);
  913                 HME_XD_SETFLAGS(pci, txd, tdhead, flags);
  914                 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
  915 
  916                 ta->hta_sc->sc_rb.rb_td_nbusy++;
  917                 htx->htx_lastdesc = tdhead;
  918                 tdhead = (tdhead + 1) % HME_NTXDESC;
  919                 len += segs[i].ds_len;
  920         }
  921         ta->hta_sc->sc_rb.rb_tdhead = tdhead;
  922         KASSERT((flags & HME_XD_EOP) != 0,
  923             ("hme_txdma_callback: missed end of packet!"));
  924 }
  925 
  926 /* TX TCP/UDP checksum */
  927 static void
  928 hme_txcksum(struct mbuf *m, u_int32_t *cflags)
  929 {
  930         struct ip *ip;
  931         u_int32_t offset, offset2;
  932         caddr_t p;
  933 
  934         for(; m && m->m_len == 0; m = m->m_next)
  935                 ;
  936         if (m == NULL || m->m_len < ETHER_HDR_LEN) {
  937                 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
  938                 return; /* checksum will be corrupted */
  939         }
  940         if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
  941                 if (m->m_len != ETHER_HDR_LEN) {
  942                         printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
  943                         return; /* checksum will be corrupted */
  944                 }
  945                 /* XXX */
  946                 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
  947                         ;
  948                 if (m == NULL)
  949                         return; /* checksum will be corrupted */
  950                 ip = mtod(m, struct ip *);
  951         } else {
  952                 p = mtod(m, caddr_t);
  953                 p += ETHER_HDR_LEN;
  954                 ip = (struct ip *)p;
  955         }
  956         offset2 = m->m_pkthdr.csum_data;
  957         offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
  958         *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
  959         *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 
  960         *cflags |= HME_XD_TXCKSUM;
  961 }
  962 
  963 /*
  964  * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
  965  * start the transmission.
  966  * Returns 0 on success, -1 if there were not enough free descriptors to map
  967  * the packet, or an errno otherwise.
  968  */
  969 static int
  970 hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
  971 {
  972         struct hme_txdma_arg cba;
  973         struct hme_txdesc *td;
  974         int error, si, ri;
  975         u_int32_t flags, cflags = 0;
  976 
  977         si = sc->sc_rb.rb_tdhead;
  978         if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  979                 return (-1);
  980         if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
  981                 hme_txcksum(m0, &cflags);
  982         td->htx_m = m0;
  983         cba.hta_sc = sc;
  984         cba.hta_htx = td;
  985         if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
  986              m0, hme_txdma_callback, &cba, 0)) != 0)
  987                 goto fail;
  988         if (cba.hta_ndescs == -1) {
  989                 error = -1;
  990                 goto fail;
  991         }
  992         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  993             BUS_DMASYNC_PREWRITE);
  994 
  995         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
  996         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
  997 
  998         /* Turn descriptor ownership to the hme, back to forth. */
  999         ri = sc->sc_rb.rb_tdhead;
 1000         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
 1001             ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
 1002         do {
 1003                 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1004                 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
 1005                     HME_XD_OWN | cflags;
 1006                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1007                     ri, si, flags);
 1008                 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
 1009         } while (ri != si);
 1010 
 1011         /* start the transmission. */
 1012         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1013         return (0);
 1014 fail:
 1015         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
 1016         return (error);
 1017 }
 1018 
 1019 /*
 1020  * Pass a packet to the higher levels.
 1021  */
 1022 static void
 1023 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1024 {
 1025         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1026         struct mbuf *m;
 1027 
 1028         if (len <= sizeof(struct ether_header) ||
 1029             len > HME_MAX_FRAMESIZE) {
 1030 #ifdef HMEDEBUG
 1031                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1032                     len);
 1033 #endif
 1034                 ifp->if_ierrors++;
 1035                 hme_discard_rxbuf(sc, ix);
 1036                 return;
 1037         }
 1038 
 1039         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1040         CTR1(KTR_HME, "hme_read: len %d", len);
 1041 
 1042         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1043                 /*
 1044                  * hme_add_rxbuf will leave the old buffer in the ring until
 1045                  * it is sure that a new buffer can be mapped. If it can not,
 1046                  * drop the packet, but leave the interface up.
 1047                  */
 1048                 ifp->if_iqdrops++;
 1049                 hme_discard_rxbuf(sc, ix);
 1050                 return;
 1051         }
 1052 
 1053         ifp->if_ipackets++;
 1054 
 1055         m->m_pkthdr.rcvif = ifp;
 1056         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1057         m_adj(m, HME_RXOFFS);
 1058         /* RX TCP/UDP checksum */
 1059         if (ifp->if_capenable & IFCAP_RXCSUM)
 1060                 hme_rxcksum(m, flags);
 1061         /* Pass the packet up. */
 1062         (*ifp->if_input)(ifp, m);
 1063 }
 1064 
 1065 static void
 1066 hme_start(struct ifnet *ifp)
 1067 {
 1068         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1069         struct mbuf *m;
 1070         int error, enq = 0;
 1071 
 1072         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
 1073                 return;
 1074 
 1075         error = 0;
 1076         for (;;) {
 1077                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1078                 if (m == NULL)
 1079                         break;
 1080 
 1081                 error = hme_load_txmbuf(sc, m);
 1082                 if (error == -1) {
 1083                         ifp->if_flags |= IFF_OACTIVE;
 1084                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1085                         break;
 1086                 } else if (error > 0) {
 1087                         printf("hme_start: error %d while loading mbuf\n",
 1088                             error);
 1089                 } else {
 1090                         enq = 1;
 1091                         BPF_MTAP(ifp, m);
 1092                 }
 1093         }
 1094 
 1095         if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
 1096                 ifp->if_flags |= IFF_OACTIVE;
 1097         /* Set watchdog timer if a packet was queued */
 1098         if (enq) {
 1099                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1100                     BUS_DMASYNC_PREWRITE);
 1101                 ifp->if_timer = 5;
 1102         }
 1103 }
 1104 
 1105 /*
 1106  * Transmit interrupt.
 1107  */
 1108 static void
 1109 hme_tint(struct hme_softc *sc)
 1110 {
 1111         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1112         struct hme_txdesc *htx;
 1113         unsigned int ri, txflags;
 1114 
 1115         /*
 1116          * Unload collision counters
 1117          */
 1118         ifp->if_collisions +=
 1119                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
 1120                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
 1121                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
 1122                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
 1123 
 1124         /*
 1125          * then clear the hardware counters.
 1126          */
 1127         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
 1128         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
 1129         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
 1130         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
 1131 
 1132         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1133         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1134         /* Fetch current position in the transmit ring */
 1135         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1136                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1137                         CTR0(KTR_HME, "hme_tint: not busy!");
 1138                         break;
 1139                 }
 1140 
 1141                 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
 1142                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1143 
 1144                 if ((txflags & HME_XD_OWN) != 0)
 1145                         break;
 1146 
 1147                 CTR0(KTR_HME, "hme_tint: not owned");
 1148                 --sc->sc_rb.rb_td_nbusy;
 1149                 ifp->if_flags &= ~IFF_OACTIVE;
 1150 
 1151                 /* Complete packet transmitted? */
 1152                 if ((txflags & HME_XD_EOP) == 0)
 1153                         continue;
 1154 
 1155                 KASSERT(htx->htx_lastdesc == ri,
 1156                     ("hme_tint: ring indices skewed: %d != %d!",
 1157                      htx->htx_lastdesc, ri));
 1158                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1159                     BUS_DMASYNC_POSTWRITE);
 1160                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1161 
 1162                 ifp->if_opackets++;
 1163                 m_freem(htx->htx_m);
 1164                 htx->htx_m = NULL;
 1165                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1166                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1167                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1168         }
 1169         /* Turn off watchdog */
 1170         if (sc->sc_rb.rb_td_nbusy == 0)
 1171                 ifp->if_timer = 0;
 1172 
 1173         /* Update ring */
 1174         sc->sc_rb.rb_tdtail = ri;
 1175 
 1176         hme_start(ifp);
 1177 
 1178         if (sc->sc_rb.rb_td_nbusy == 0)
 1179                 ifp->if_timer = 0;
 1180 }
 1181 
 1182 /*
 1183  * RX TCP/UDP checksum 
 1184  */
 1185 static void
 1186 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1187 {
 1188         struct ether_header *eh;
 1189         struct ip *ip;
 1190         struct udphdr *uh;
 1191         int32_t hlen, len, pktlen;
 1192         u_int16_t cksum, *opts;
 1193         u_int32_t temp32;
 1194 
 1195         pktlen = m->m_pkthdr.len;
 1196         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1197                 return;
 1198         eh = mtod(m, struct ether_header *);
 1199         if (eh->ether_type != htons(ETHERTYPE_IP))
 1200                 return;
 1201         ip = (struct ip *)(eh + 1);
 1202         if (ip->ip_v != IPVERSION)
 1203                 return;
 1204 
 1205         hlen = ip->ip_hl << 2;
 1206         pktlen -= sizeof(struct ether_header);
 1207         if (hlen < sizeof(struct ip))
 1208                 return;
 1209         if (ntohs(ip->ip_len) < hlen)
 1210                 return;
 1211         if (ntohs(ip->ip_len) != pktlen)
 1212                 return;
 1213         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1214                 return; /* can't handle fragmented packet */
 1215 
 1216         switch (ip->ip_p) {
 1217         case IPPROTO_TCP:
 1218                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1219                         return;
 1220                 break;
 1221         case IPPROTO_UDP:
 1222                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1223                         return;
 1224                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1225                 if (uh->uh_sum == 0)
 1226                         return; /* no checksum */
 1227                 break;
 1228         default:
 1229                 return;
 1230         }
 1231 
 1232         cksum = ~(flags & HME_XD_RXCKSUM);
 1233         /* checksum fixup for IP options */
 1234         len = hlen - sizeof(struct ip);
 1235         if (len > 0) {
 1236                 opts = (u_int16_t *)(ip + 1);
 1237                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1238                         temp32 = cksum - *opts;
 1239                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1240                         cksum = temp32 & 65535;
 1241                 }
 1242         }
 1243         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1244         m->m_pkthdr.csum_data = cksum;
 1245 }
 1246 
 1247 /*
 1248  * Receive interrupt.
 1249  */
 1250 static void
 1251 hme_rint(struct hme_softc *sc)
 1252 {
 1253         caddr_t xdr = sc->sc_rb.rb_rxd;
 1254         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1255         unsigned int ri, len;
 1256         int progress = 0;
 1257         u_int32_t flags;
 1258 
 1259         /*
 1260          * Process all buffers with valid data.
 1261          */
 1262         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1263         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1264                 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
 1265                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1266                 if ((flags & HME_XD_OWN) != 0)
 1267                         break;
 1268 
 1269                 progress++;
 1270                 if ((flags & HME_XD_OFL) != 0) {
 1271                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1272                             "flags=0x%x\n", ri, flags);
 1273                         ifp->if_ierrors++;
 1274                         hme_discard_rxbuf(sc, ri);
 1275                 } else {
 1276                         len = HME_XD_DECODE_RSIZE(flags);
 1277                         hme_read(sc, ri, len, flags);
 1278                 }
 1279         }
 1280         if (progress) {
 1281                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1282                     BUS_DMASYNC_PREWRITE);
 1283         }
 1284         sc->sc_rb.rb_rdtail = ri;
 1285 }
 1286 
 1287 static void
 1288 hme_eint(struct hme_softc *sc, u_int status)
 1289 {
 1290 
 1291         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1292                 device_printf(sc->sc_dev, "XXXlink status changed\n");
 1293                 return;
 1294         }
 1295 
 1296         HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1297 }
 1298 
 1299 void
 1300 hme_intr(void *v)
 1301 {
 1302         struct hme_softc *sc = (struct hme_softc *)v;
 1303         u_int32_t status;
 1304 
 1305         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1306         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1307 
 1308         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1309                 hme_eint(sc, status);
 1310 
 1311         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1312                 hme_tint(sc);
 1313 
 1314         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1315                 hme_rint(sc);
 1316 }
 1317 
 1318 
 1319 static void
 1320 hme_watchdog(struct ifnet *ifp)
 1321 {
 1322         struct hme_softc *sc = ifp->if_softc;
 1323 #ifdef HMEDEBUG
 1324         u_int32_t status;
 1325 
 1326         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1327         CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
 1328 #endif
 1329         device_printf(sc->sc_dev, "device timeout\n");
 1330         ++ifp->if_oerrors;
 1331 
 1332         hme_reset(sc);
 1333 }
 1334 
 1335 /*
 1336  * Initialize the MII Management Interface
 1337  */
 1338 static void
 1339 hme_mifinit(struct hme_softc *sc)
 1340 {
 1341         u_int32_t v;
 1342 
 1343         /* Configure the MIF in frame mode */
 1344         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1345         v &= ~HME_MIF_CFG_BBMODE;
 1346         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1347 }
 1348 
 1349 /*
 1350  * MII interface
 1351  */
 1352 int
 1353 hme_mii_readreg(device_t dev, int phy, int reg)
 1354 {
 1355         struct hme_softc *sc = device_get_softc(dev);
 1356         int n;
 1357         u_int32_t v;
 1358 
 1359         /* Select the desired PHY in the MIF configuration register */
 1360         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1361         /* Clear PHY select bit */
 1362         v &= ~HME_MIF_CFG_PHY;
 1363         if (phy == HME_PHYAD_EXTERNAL)
 1364                 /* Set PHY select bit to get at external device */
 1365                 v |= HME_MIF_CFG_PHY;
 1366         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1367 
 1368         /* Construct the frame command */
 1369         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1370             HME_MIF_FO_TAMSB |
 1371             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1372             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1373             (reg << HME_MIF_FO_REGAD_SHIFT);
 1374 
 1375         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1376         for (n = 0; n < 100; n++) {
 1377                 DELAY(1);
 1378                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1379                 if (v & HME_MIF_FO_TALSB)
 1380                         return (v & HME_MIF_FO_DATA);
 1381         }
 1382 
 1383         device_printf(sc->sc_dev, "mii_read timeout\n");
 1384         return (0);
 1385 }
 1386 
 1387 int
 1388 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1389 {
 1390         struct hme_softc *sc = device_get_softc(dev);
 1391         int n;
 1392         u_int32_t v;
 1393 
 1394         /* Select the desired PHY in the MIF configuration register */
 1395         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1396         /* Clear PHY select bit */
 1397         v &= ~HME_MIF_CFG_PHY;
 1398         if (phy == HME_PHYAD_EXTERNAL)
 1399                 /* Set PHY select bit to get at external device */
 1400                 v |= HME_MIF_CFG_PHY;
 1401         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1402 
 1403         /* Construct the frame command */
 1404         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1405             HME_MIF_FO_TAMSB                            |
 1406             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1407             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1408             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1409             (val & HME_MIF_FO_DATA);
 1410 
 1411         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1412         for (n = 0; n < 100; n++) {
 1413                 DELAY(1);
 1414                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1415                 if (v & HME_MIF_FO_TALSB)
 1416                         return (1);
 1417         }
 1418 
 1419         device_printf(sc->sc_dev, "mii_write timeout\n");
 1420         return (0);
 1421 }
 1422 
 1423 void
 1424 hme_mii_statchg(device_t dev)
 1425 {
 1426         struct hme_softc *sc = device_get_softc(dev);
 1427         int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
 1428         int phy = sc->sc_phys[instance];
 1429         u_int32_t v;
 1430 
 1431 #ifdef HMEDEBUG
 1432         if (sc->sc_debug)
 1433                 printf("hme_mii_statchg: status change: phy = %d\n", phy);
 1434 #endif
 1435 
 1436         /* Select the current PHY in the MIF configuration register */
 1437         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1438         v &= ~HME_MIF_CFG_PHY;
 1439         if (phy == HME_PHYAD_EXTERNAL)
 1440                 v |= HME_MIF_CFG_PHY;
 1441         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1442 
 1443         /* Set the MAC Full Duplex bit appropriately */
 1444         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1445         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
 1446                 return;
 1447         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1448                 v |= HME_MAC_TXCFG_FULLDPLX;
 1449         else
 1450                 v &= ~HME_MAC_TXCFG_FULLDPLX;
 1451         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
 1452         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
 1453                 return;
 1454 }
 1455 
 1456 static int
 1457 hme_mediachange(struct ifnet *ifp)
 1458 {
 1459         struct hme_softc *sc = ifp->if_softc;
 1460 
 1461         return (mii_mediachg(sc->sc_mii));
 1462 }
 1463 
 1464 static void
 1465 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1466 {
 1467         struct hme_softc *sc = ifp->if_softc;
 1468 
 1469         if ((ifp->if_flags & IFF_UP) == 0)
 1470                 return;
 1471 
 1472         mii_pollstat(sc->sc_mii);
 1473         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1474         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1475 }
 1476 
 1477 /*
 1478  * Process an ioctl request.
 1479  */
 1480 static int
 1481 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1482 {
 1483         struct hme_softc *sc = ifp->if_softc;
 1484         struct ifreq *ifr = (struct ifreq *)data;
 1485         int s, error = 0;
 1486 
 1487         s = splnet();
 1488 
 1489         switch (cmd) {
 1490         case SIOCSIFFLAGS:
 1491                 if ((ifp->if_flags & IFF_UP) == 0 &&
 1492                     (ifp->if_flags & IFF_RUNNING) != 0) {
 1493                         /*
 1494                          * If interface is marked down and it is running, then
 1495                          * stop it.
 1496                          */
 1497                         hme_stop(sc);
 1498                         ifp->if_flags &= ~IFF_RUNNING;
 1499                 } else if ((ifp->if_flags & IFF_UP) != 0 &&
 1500                            (ifp->if_flags & IFF_RUNNING) == 0) {
 1501                         /*
 1502                          * If interface is marked up and it is stopped, then
 1503                          * start it.
 1504                          */
 1505                         hme_init(sc);
 1506                 } else if ((ifp->if_flags & IFF_UP) != 0) {
 1507                         /*
 1508                          * Reset the interface to pick up changes in any other
 1509                          * flags that affect hardware registers.
 1510                          */
 1511                         hme_init(sc);
 1512                 }
 1513                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1514                         sc->sc_csum_features |= CSUM_UDP;
 1515                 else
 1516                         sc->sc_csum_features &= ~CSUM_UDP;
 1517                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1518                         ifp->if_hwassist = sc->sc_csum_features;
 1519 #ifdef HMEDEBUG
 1520                 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
 1521 #endif
 1522                 break;
 1523 
 1524         case SIOCADDMULTI:
 1525         case SIOCDELMULTI:
 1526                 hme_setladrf(sc, 1);
 1527                 error = 0;
 1528                 break;
 1529         case SIOCGIFMEDIA:
 1530         case SIOCSIFMEDIA:
 1531                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1532                 break;
 1533         case SIOCSIFCAP:
 1534                 ifp->if_capenable = ifr->ifr_reqcap;
 1535                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1536                         ifp->if_hwassist = sc->sc_csum_features;
 1537                 else
 1538                         ifp->if_hwassist = 0;
 1539                 break;
 1540         default:
 1541                 error = ether_ioctl(ifp, cmd, data);
 1542                 break;
 1543         }
 1544 
 1545         splx(s);
 1546         return (error);
 1547 }
 1548 
 1549 /*
 1550  * Set up the logical address filter.
 1551  */
 1552 static void
 1553 hme_setladrf(struct hme_softc *sc, int reenable)
 1554 {
 1555         struct ifnet *ifp = &sc->sc_arpcom.ac_if;
 1556         struct ifmultiaddr *inm;
 1557         u_int32_t crc;
 1558         u_int32_t hash[4];
 1559         u_int32_t macc;
 1560 
 1561         /* Clear hash table */
 1562         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1563 
 1564         /* Get current RX configuration */
 1565         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1566 
 1567         /*
 1568          * Disable the receiver while changing it's state as the documentation
 1569          * mandates.
 1570          * We then must wait until the bit clears in the register. This should
 1571          * take at most 3.5ms.
 1572          */
 1573         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
 1574                 return;
 1575         /* Disable the hash filter before writing to the filter registers. */
 1576         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1577             HME_MAC_RXCFG_HENABLE, 0))
 1578                 return;
 1579 
 1580         if (reenable)
 1581                 macc |= HME_MAC_RXCFG_ENABLE;
 1582         else
 1583                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1584 
 1585         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1586                 /* Turn on promiscuous mode; turn off the hash filter */
 1587                 macc |= HME_MAC_RXCFG_PMISC;
 1588                 macc &= ~HME_MAC_RXCFG_HENABLE;
 1589                 ifp->if_flags |= IFF_ALLMULTI;
 1590                 goto chipit;
 1591         }
 1592 
 1593         /* Turn off promiscuous mode; turn on the hash filter */
 1594         macc &= ~HME_MAC_RXCFG_PMISC;
 1595         macc |= HME_MAC_RXCFG_HENABLE;
 1596 
 1597         /*
 1598          * Set up multicast address filter by passing all multicast addresses
 1599          * through a crc generator, and then using the high order 6 bits as an
 1600          * index into the 64 bit logical address filter.  The high order bit
 1601          * selects the word, while the rest of the bits select the bit within
 1602          * the word.
 1603          */
 1604 
 1605         TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
 1606                 if (inm->ifma_addr->sa_family != AF_LINK)
 1607                         continue;
 1608                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1609                     inm->ifma_addr), ETHER_ADDR_LEN);
 1610 
 1611                 /* Just want the 6 most significant bits. */
 1612                 crc >>= 26;
 1613 
 1614                 /* Set the corresponding bit in the filter. */
 1615                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1616         }
 1617 
 1618         ifp->if_flags &= ~IFF_ALLMULTI;
 1619 
 1620 chipit:
 1621         /* Now load the hash table into the chip */
 1622         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1623         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1624         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1625         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1626         hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1627             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
 1628 }

Cache object: 709205e3571f66945d5cf956ad1b4b34


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.