The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD$");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #if 0
   65 #define HMEDEBUG
   66 #endif
   67 #define KTR_HME         KTR_CT2         /* XXX */
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/bus.h>
   72 #include <sys/endian.h>
   73 #include <sys/kernel.h>
   74 #include <sys/module.h>
   75 #include <sys/ktr.h>
   76 #include <sys/mbuf.h>
   77 #include <sys/malloc.h>
   78 #include <sys/socket.h>
   79 #include <sys/sockio.h>
   80 
   81 #include <net/bpf.h>
   82 #include <net/ethernet.h>
   83 #include <net/if.h>
   84 #include <net/if_arp.h>
   85 #include <net/if_dl.h>
   86 #include <net/if_media.h>
   87 #include <net/if_types.h>
   88 #include <net/if_vlan_var.h>
   89 
   90 #include <netinet/in.h>
   91 #include <netinet/in_systm.h>
   92 #include <netinet/ip.h>
   93 #include <netinet/tcp.h>
   94 #include <netinet/udp.h>
   95 
   96 #include <dev/mii/mii.h>
   97 #include <dev/mii/miivar.h>
   98 
   99 #include <machine/bus.h>
  100 
  101 #include <dev/hme/if_hmereg.h>
  102 #include <dev/hme/if_hmevar.h>
  103 
  104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
  105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
  106 
  107 static void     hme_start(struct ifnet *);
  108 static void     hme_start_locked(struct ifnet *);
  109 static void     hme_stop(struct hme_softc *);
  110 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  111 static void     hme_tick(void *);
  112 static int      hme_watchdog(struct hme_softc *);
  113 static void     hme_init(void *);
  114 static void     hme_init_locked(struct hme_softc *);
  115 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  116 static int      hme_meminit(struct hme_softc *);
  117 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  118     u_int32_t, u_int32_t);
  119 static void     hme_mifinit(struct hme_softc *);
  120 static void     hme_setladrf(struct hme_softc *, int);
  121 
  122 static int      hme_mediachange(struct ifnet *);
  123 static int      hme_mediachange_locked(struct hme_softc *);
  124 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  125 
  126 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  127 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  128 static void     hme_eint(struct hme_softc *, u_int);
  129 static void     hme_rint(struct hme_softc *);
  130 static void     hme_tint(struct hme_softc *);
  131 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  132 
  133 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  134 
  135 devclass_t hme_devclass;
  136 
  137 static int hme_nerr;
  138 
  139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  141 
  142 #define HME_SPC_READ_4(spc, sc, offs) \
  143         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  144             (offs))
  145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  146         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  147             (offs), (v))
  148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
  149         bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  150             (offs), (l), (f))
  151 
  152 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  153 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  154 #define HME_SEB_BARRIER(sc, offs, l, f) \
  155         HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
  156 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  157 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  158 #define HME_ERX_BARRIER(sc, offs, l, f) \
  159         HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
  160 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  161 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  162 #define HME_ETX_BARRIER(sc, offs, l, f) \
  163         HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
  164 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  165 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  166 #define HME_MAC_BARRIER(sc, offs, l, f) \
  167         HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
  168 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  169 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  170 #define HME_MIF_BARRIER(sc, offs, l, f) \
  171         HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
  172 
  173 #define HME_MAXERR      5
  174 #define HME_WHINE(dev, ...) do {                                        \
  175         if (hme_nerr++ < HME_MAXERR)                                    \
  176                 device_printf(dev, __VA_ARGS__);                        \
  177         if (hme_nerr == HME_MAXERR) {                                   \
  178                 device_printf(dev, "too many errors; not reporting "    \
  179                     "any more\n");                                      \
  180         }                                                               \
  181 } while(0)
  182 
  183 /* Support oversized VLAN frames. */
  184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  185 
  186 int
  187 hme_config(struct hme_softc *sc)
  188 {
  189         struct ifnet *ifp;
  190         struct mii_softc *child;
  191         bus_size_t size;
  192         int error, rdesc, tdesc, i;
  193 
  194         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  195         if (ifp == NULL)
  196                 return (ENOSPC);
  197 
  198         /*
  199          * HME common initialization.
  200          *
  201          * hme_softc fields that must be initialized by the front-end:
  202          *
  203          * the DMA bus tag:
  204          *      sc_dmatag
  205          *
  206          * the bus handles, tags and offsets (splitted for SBus compatability):
  207          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  208          *      sc_erx{t,h,o}   (Receiver Unit registers)
  209          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  210          *      sc_mac{t,h,o}   (MAC registers)
  211          *      sc_mif{t,h,o}   (Management Interface registers)
  212          *
  213          * the maximum bus burst size:
  214          *      sc_burst
  215          *
  216          */
  217 
  218         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  219 
  220         /* Make sure the chip is stopped. */
  221         HME_LOCK(sc);
  222         hme_stop(sc);
  223         HME_UNLOCK(sc);
  224 
  225         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  226             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  227             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
  228             NULL, NULL, &sc->sc_pdmatag);
  229         if (error)
  230                 goto fail_ifnet;
  231 
  232         /*
  233          * Create control, RX and TX mbuf DMA tags.
  234          * Buffer descriptors must be aligned on a 2048 byte boundary;
  235          * take this into account when calculating the size. Note that
  236          * the maximum number of descriptors (256) occupies 2048 bytes,
  237          * so we allocate that much regardless of HME_N*DESC.
  238          */
  239         size = 4096;
  240         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  241             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  242             1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
  243         if (error)
  244                 goto fail_ptag;
  245 
  246         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  247             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  248             1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
  249         if (error)
  250                 goto fail_ctag;
  251 
  252         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  253             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  254             MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
  255             NULL, NULL, &sc->sc_tdmatag);
  256         if (error)
  257                 goto fail_rtag;
  258 
  259         /* Allocate the control DMA buffer. */
  260         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  261             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
  262         if (error != 0) {
  263                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  264                 goto fail_ttag;
  265         }
  266 
  267         /* Load the control DMA buffer. */
  268         sc->sc_rb.rb_dmabase = 0;
  269         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  270             sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  271             sc->sc_rb.rb_dmabase == 0) {
  272                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  273                     error);
  274                 goto fail_free;
  275         }
  276         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  277             sc->sc_rb.rb_dmabase);
  278 
  279         /*
  280          * Prepare the RX descriptors. rdesc serves as marker for the last
  281          * processed descriptor and may be used later on.
  282          */
  283         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  284                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  285                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  286                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  287                 if (error != 0)
  288                         goto fail_rxdesc;
  289         }
  290         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  291             &sc->sc_rb.rb_spare_dmamap);
  292         if (error != 0)
  293                 goto fail_rxdesc;
  294         /* Same for the TX descs. */
  295         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  296                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  297                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  298                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  299                 if (error != 0)
  300                         goto fail_txdesc;
  301         }
  302 
  303         sc->sc_csum_features = HME_CSUM_FEATURES;
  304         /* Initialize ifnet structure. */
  305         ifp->if_softc = sc;
  306         if_initname(ifp, device_get_name(sc->sc_dev),
  307             device_get_unit(sc->sc_dev));
  308         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  309         ifp->if_start = hme_start;
  310         ifp->if_ioctl = hme_ioctl;
  311         ifp->if_init = hme_init;
  312         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  313         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  314         IFQ_SET_READY(&ifp->if_snd);
  315 
  316         hme_mifinit(sc);
  317 
  318         if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
  319             hme_mediastatus)) != 0) {
  320                 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
  321                 goto fail_rxdesc;
  322         }
  323         sc->sc_mii = device_get_softc(sc->sc_miibus);
  324 
  325         /*
  326          * Walk along the list of attached MII devices and
  327          * establish an `MII instance' to `PHY number'
  328          * mapping. We'll use this mapping to enable the MII
  329          * drivers of the external transceiver according to
  330          * the currently selected media.
  331          */
  332         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  333         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  334                 /*
  335                  * Note: we support just two PHYs: the built-in
  336                  * internal device and an external on the MII
  337                  * connector.
  338                  */
  339                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  340                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  341                     child->mii_inst > 1) {
  342                         device_printf(sc->sc_dev, "cannot accommodate "
  343                             "MII device %s at phy %d, instance %d\n",
  344                             device_get_name(child->mii_dev),
  345                             child->mii_phy, child->mii_inst);
  346                         continue;
  347                 }
  348 
  349                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  350         }
  351 
  352         /* Attach the interface. */
  353         ether_ifattach(ifp, sc->sc_enaddr);
  354 
  355         /*
  356          * Tell the upper layer(s) we support long frames/checksum offloads.
  357          */
  358         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  359         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  360         ifp->if_hwassist |= sc->sc_csum_features;
  361         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  362         return (0);
  363 
  364 fail_txdesc:
  365         for (i = 0; i < tdesc; i++) {
  366                 bus_dmamap_destroy(sc->sc_tdmatag,
  367                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  368         }
  369         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  370 fail_rxdesc:
  371         for (i = 0; i < rdesc; i++) {
  372                 bus_dmamap_destroy(sc->sc_rdmatag,
  373                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  374         }
  375         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  376 fail_free:
  377         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  378 fail_ttag:
  379         bus_dma_tag_destroy(sc->sc_tdmatag);
  380 fail_rtag:
  381         bus_dma_tag_destroy(sc->sc_rdmatag);
  382 fail_ctag:
  383         bus_dma_tag_destroy(sc->sc_cdmatag);
  384 fail_ptag:
  385         bus_dma_tag_destroy(sc->sc_pdmatag);
  386 fail_ifnet:
  387         if_free(ifp);
  388         return (error);
  389 }
  390 
  391 void
  392 hme_detach(struct hme_softc *sc)
  393 {
  394         struct ifnet *ifp = sc->sc_ifp;
  395         int i;
  396 
  397         HME_LOCK(sc);
  398         hme_stop(sc);
  399         HME_UNLOCK(sc);
  400         callout_drain(&sc->sc_tick_ch);
  401         ether_ifdetach(ifp);
  402         if_free(ifp);
  403         device_delete_child(sc->sc_dev, sc->sc_miibus);
  404 
  405         for (i = 0; i < HME_NTXQ; i++) {
  406                 bus_dmamap_destroy(sc->sc_tdmatag,
  407                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  408         }
  409         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  410         for (i = 0; i < HME_NRXDESC; i++) {
  411                 bus_dmamap_destroy(sc->sc_rdmatag,
  412                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  413         }
  414         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  415             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  416         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  417         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  418         bus_dma_tag_destroy(sc->sc_tdmatag);
  419         bus_dma_tag_destroy(sc->sc_rdmatag);
  420         bus_dma_tag_destroy(sc->sc_cdmatag);
  421         bus_dma_tag_destroy(sc->sc_pdmatag);
  422 }
  423 
  424 void
  425 hme_suspend(struct hme_softc *sc)
  426 {
  427 
  428         HME_LOCK(sc);
  429         hme_stop(sc);
  430         HME_UNLOCK(sc);
  431 }
  432 
  433 void
  434 hme_resume(struct hme_softc *sc)
  435 {
  436         struct ifnet *ifp = sc->sc_ifp;
  437 
  438         HME_LOCK(sc);
  439         if ((ifp->if_flags & IFF_UP) != 0)
  440                 hme_init_locked(sc);
  441         HME_UNLOCK(sc);
  442 }
  443 
  444 static void
  445 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  446 {
  447         struct hme_softc *sc = (struct hme_softc *)xsc;
  448 
  449         if (error != 0)
  450                 return;
  451         KASSERT(nsegs == 1,
  452             ("%s: too many DMA segments (%d)", __func__, nsegs));
  453         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  454 }
  455 
  456 static void
  457 hme_tick(void *arg)
  458 {
  459         struct hme_softc *sc = arg;
  460         struct ifnet *ifp;
  461 
  462         HME_LOCK_ASSERT(sc, MA_OWNED);
  463 
  464         ifp = sc->sc_ifp;
  465         /*
  466          * Unload collision counters
  467          */
  468         ifp->if_collisions +=
  469                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  470                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  471                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  472                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
  473 
  474         /*
  475          * then clear the hardware counters.
  476          */
  477         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  478         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  479         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  480         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  481 
  482         mii_tick(sc->sc_mii);
  483 
  484         if (hme_watchdog(sc) == EJUSTRETURN)
  485                 return;
  486 
  487         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  488 }
  489 
  490 static void
  491 hme_stop(struct hme_softc *sc)
  492 {
  493         u_int32_t v;
  494         int n;
  495 
  496         callout_stop(&sc->sc_tick_ch);
  497         sc->sc_wdog_timer = 0;
  498         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  499         sc->sc_flags &= ~HME_LINK;
  500 
  501         /* Mask all interrupts */
  502         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  503 
  504         /* Reset transmitter and receiver */
  505         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  506             HME_SEB_RESET_ERX);
  507         HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
  508             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  509         for (n = 0; n < 20; n++) {
  510                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  511                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  512                         return;
  513                 DELAY(20);
  514         }
  515 
  516         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  517 }
  518 
  519 /*
  520  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  521  * ring for subsequent use.
  522  */
  523 static __inline void
  524 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  525 {
  526 
  527         /*
  528          * Dropped a packet, reinitialize the descriptor and turn the
  529          * ownership back to the hardware.
  530          */
  531         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
  532             ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
  533             &sc->sc_rb.rb_rxdesc[ix])));
  534 }
  535 
  536 static int
  537 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  538 {
  539         struct hme_rxdesc *rd;
  540         struct mbuf *m;
  541         bus_dma_segment_t segs[1];
  542         bus_dmamap_t map;
  543         uintptr_t b;
  544         int a, unmap, nsegs;
  545 
  546         rd = &sc->sc_rb.rb_rxdesc[ri];
  547         unmap = rd->hrx_m != NULL;
  548         if (unmap && keepold) {
  549                 /*
  550                  * Reinitialize the descriptor flags, as they may have been
  551                  * altered by the hardware.
  552                  */
  553                 hme_discard_rxbuf(sc, ri);
  554                 return (0);
  555         }
  556         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  557                 return (ENOBUFS);
  558         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  559         b = mtod(m, uintptr_t);
  560         /*
  561          * Required alignment boundary. At least 16 is needed, but since
  562          * the mapping must be done in a way that a burst can start on a
  563          * natural boundary we might need to extend this.
  564          */
  565         a = imax(HME_MINRXALIGN, sc->sc_burst);
  566         /*
  567          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  568          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  569          * alignment of the header adjacent to the ethernet header, which
  570          * should be sufficient in all cases. Nevertheless, this second-guesses
  571          * ALIGN().
  572          */
  573         m_adj(m, roundup2(b, a) - b);
  574         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  575             m, segs, &nsegs, 0) != 0) {
  576                 m_freem(m);
  577                 return (ENOBUFS);
  578         }
  579         /* If nsegs is wrong then the stack is corrupt. */
  580         KASSERT(nsegs == 1,
  581             ("%s: too many DMA segments (%d)", __func__, nsegs));
  582         if (unmap) {
  583                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  584                     BUS_DMASYNC_POSTREAD);
  585                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  586         }
  587         map = rd->hrx_dmamap;
  588         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  589         sc->sc_rb.rb_spare_dmamap = map;
  590         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  591         HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  592             segs[0].ds_addr);
  593         rd->hrx_m = m;
  594         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  595             HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  596         return (0);
  597 }
  598 
  599 static int
  600 hme_meminit(struct hme_softc *sc)
  601 {
  602         struct hme_ring *hr = &sc->sc_rb;
  603         struct hme_txdesc *td;
  604         bus_addr_t dma;
  605         caddr_t p;
  606         unsigned int i;
  607         int error;
  608 
  609         p = hr->rb_membase;
  610         dma = hr->rb_dmabase;
  611 
  612         /*
  613          * Allocate transmit descriptors
  614          */
  615         hr->rb_txd = p;
  616         hr->rb_txddma = dma;
  617         p += HME_NTXDESC * HME_XD_SIZE;
  618         dma += HME_NTXDESC * HME_XD_SIZE;
  619         /*
  620          * We have reserved descriptor space until the next 2048 byte
  621          * boundary.
  622          */
  623         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  624         p = (caddr_t)roundup((u_long)p, 2048);
  625 
  626         /*
  627          * Allocate receive descriptors
  628          */
  629         hr->rb_rxd = p;
  630         hr->rb_rxddma = dma;
  631         p += HME_NRXDESC * HME_XD_SIZE;
  632         dma += HME_NRXDESC * HME_XD_SIZE;
  633         /* Again move forward to the next 2048 byte boundary.*/
  634         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  635         p = (caddr_t)roundup((u_long)p, 2048);
  636 
  637         /*
  638          * Initialize transmit buffer descriptors
  639          */
  640         for (i = 0; i < HME_NTXDESC; i++) {
  641                 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  642                 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  643         }
  644 
  645         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  646         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  647         for (i = 0; i < HME_NTXQ; i++) {
  648                 td = &sc->sc_rb.rb_txdesc[i];
  649                 if (td->htx_m != NULL) {
  650                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  651                             BUS_DMASYNC_POSTWRITE);
  652                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  653                         m_freem(td->htx_m);
  654                         td->htx_m = NULL;
  655                 }
  656                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  657         }
  658 
  659         /*
  660          * Initialize receive buffer descriptors
  661          */
  662         for (i = 0; i < HME_NRXDESC; i++) {
  663                 error = hme_add_rxbuf(sc, i, 1);
  664                 if (error != 0)
  665                         return (error);
  666         }
  667 
  668         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  669             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  670 
  671         hr->rb_tdhead = hr->rb_tdtail = 0;
  672         hr->rb_td_nbusy = 0;
  673         hr->rb_rdtail = 0;
  674         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  675             hr->rb_txddma);
  676         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  677             hr->rb_rxddma);
  678         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  679             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  680         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  681             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  682         return (0);
  683 }
  684 
  685 static int
  686 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  687     u_int32_t clr, u_int32_t set)
  688 {
  689         int i = 0;
  690 
  691         val &= ~clr;
  692         val |= set;
  693         HME_MAC_WRITE_4(sc, reg, val);
  694         HME_MAC_BARRIER(sc, reg, 4,
  695             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  696         if (clr == 0 && set == 0)
  697                 return (1);     /* just write, no bits to wait for */
  698         do {
  699                 DELAY(100);
  700                 i++;
  701                 val = HME_MAC_READ_4(sc, reg);
  702                 if (i > 40) {
  703                         /* After 3.5ms, we should have been done. */
  704                         device_printf(sc->sc_dev, "timeout while writing to "
  705                             "MAC configuration register\n");
  706                         return (0);
  707                 }
  708         } while ((val & clr) != 0 && (val & set) != set);
  709         return (1);
  710 }
  711 
  712 /*
  713  * Initialization of interface; set up initialization block
  714  * and transmit/receive descriptor rings.
  715  */
  716 static void
  717 hme_init(void *xsc)
  718 {
  719         struct hme_softc *sc = (struct hme_softc *)xsc;
  720 
  721         HME_LOCK(sc);
  722         hme_init_locked(sc);
  723         HME_UNLOCK(sc);
  724 }
  725 
  726 static void
  727 hme_init_locked(struct hme_softc *sc)
  728 {
  729         struct ifnet *ifp = sc->sc_ifp;
  730         u_int8_t *ea;
  731         u_int32_t n, v;
  732 
  733         HME_LOCK_ASSERT(sc, MA_OWNED);
  734         /*
  735          * Initialization sequence. The numbered steps below correspond
  736          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  737          * Channel Engine manual (part of the PCIO manual).
  738          * See also the STP2002-STQ document from Sun Microsystems.
  739          */
  740 
  741         /* step 1 & 2. Reset the Ethernet Channel */
  742         hme_stop(sc);
  743 
  744         /* Re-initialize the MIF */
  745         hme_mifinit(sc);
  746 
  747 #if 0
  748         /* Mask all MIF interrupts, just in case */
  749         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  750 #endif
  751 
  752         /* step 3. Setup data structures in host memory */
  753         if (hme_meminit(sc) != 0) {
  754                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  755                 return;
  756         }
  757 
  758         /* step 4. TX MAC registers & counters */
  759         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  760         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  761         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  762         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  763         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  764 
  765         /* Load station MAC address */
  766         ea = IF_LLADDR(ifp);
  767         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  768         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  769         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  770 
  771         /*
  772          * Init seed for backoff
  773          * (source suggested by manual: low 10 bits of MAC address)
  774          */
  775         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  776         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  777 
  778         /* Note: Accepting power-on default for other MAC registers here.. */
  779 
  780         /* step 5. RX MAC registers & counters */
  781         hme_setladrf(sc, 0);
  782 
  783         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  784         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  785         /* Transmit Descriptor ring size: in increments of 16 */
  786         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  787 
  788         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  789         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  790 
  791         /* step 8. Global Configuration & Interrupt Mask */
  792         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  793             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  794                 HME_SEB_STAT_HOSTTOTX |
  795                 HME_SEB_STAT_RXTOHOST |
  796                 HME_SEB_STAT_TXALL |
  797                 HME_SEB_STAT_TXPERR |
  798                 HME_SEB_STAT_RCNTEXP |
  799                 HME_SEB_STAT_ALL_ERRORS ));
  800 
  801         switch (sc->sc_burst) {
  802         default:
  803                 v = 0;
  804                 break;
  805         case 16:
  806                 v = HME_SEB_CFG_BURST16;
  807                 break;
  808         case 32:
  809                 v = HME_SEB_CFG_BURST32;
  810                 break;
  811         case 64:
  812                 v = HME_SEB_CFG_BURST64;
  813                 break;
  814         }
  815         /*
  816          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  817          * Allowing 64bit transfers breaks TX checksum offload as well.
  818          * Don't know this comes from hardware bug or driver's DMAing
  819          * scheme.
  820          *
  821          * if (sc->sc_flags & HME_PCI == 0)
  822          *      v |= HME_SEB_CFG_64BIT;
  823          */
  824         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  825 
  826         /* step 9. ETX Configuration: use mostly default values */
  827 
  828         /* Enable DMA */
  829         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  830         v |= HME_ETX_CFG_DMAENABLE;
  831         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  832 
  833         /* step 10. ERX Configuration */
  834         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  835 
  836         /* Encode Receive Descriptor ring size: four possible values */
  837         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  838         switch (HME_NRXDESC) {
  839         case 32:
  840                 v |= HME_ERX_CFG_RINGSIZE32;
  841                 break;
  842         case 64:
  843                 v |= HME_ERX_CFG_RINGSIZE64;
  844                 break;
  845         case 128:
  846                 v |= HME_ERX_CFG_RINGSIZE128;
  847                 break;
  848         case 256:
  849                 v |= HME_ERX_CFG_RINGSIZE256;
  850                 break;
  851         default:
  852                 printf("hme: invalid Receive Descriptor ring size\n");
  853                 break;
  854         }
  855 
  856         /* Enable DMA, fix RX first byte offset. */
  857         v &= ~HME_ERX_CFG_FBO_MASK;
  858         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  859         /* RX TCP/UDP checksum offset */
  860         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  861         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  862         v |= n;
  863         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  864         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  865 
  866         /* step 11. XIF Configuration */
  867         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  868         v |= HME_MAC_XIF_OE;
  869         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  870         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  871 
  872         /* step 12. RX_MAC Configuration Register */
  873         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  874         v |= HME_MAC_RXCFG_ENABLE;
  875         v &= ~(HME_MAC_RXCFG_DCRCS);
  876         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  877         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  878 
  879         /* step 13. TX_MAC Configuration Register */
  880         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  881         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  882         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  883         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  884 
  885         /* step 14. Issue Transmit Pending command */
  886 
  887 #ifdef HMEDEBUG
  888         /* Debug: double-check. */
  889         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  890             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  891             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  892             HME_ERX_READ_4(sc, HME_ERXI_RING),
  893             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  894         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  895             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  896             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  897             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  898         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  899             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  900             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  901 #endif
  902 
  903         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  904         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  905 
  906         /* Set the current media. */
  907         hme_mediachange_locked(sc);
  908 
  909         /* Start the one second timer. */
  910         sc->sc_wdog_timer = 0;
  911         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  912 }
  913 
  914 /*
  915  * Routine to DMA map an mbuf chain, set up the descriptor rings
  916  * accordingly and start the transmission.
  917  * Returns 0 on success, -1 if there were not enough free descriptors
  918  * to map the packet, or an errno otherwise.
  919  *
  920  * XXX: this relies on the fact that segments returned by
  921  * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
  922  * boundary on (i.e. potentially before ds_addr) to the first
  923  * boundary beyond the end.  This is usually a safe assumption to
  924  * make, but is not documented.
  925  */
  926 static int
  927 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  928 {
  929         bus_dma_segment_t segs[HME_NTXSEGS];
  930         struct hme_txdesc *htx;
  931         struct ip *ip;
  932         struct mbuf *m;
  933         caddr_t txd;
  934         int error, i, nsegs, pci, ri, si;
  935         uint32_t cflags, flags;
  936 
  937         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  938                 return (ENOBUFS);
  939 
  940         cflags = 0;
  941         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
  942                 if (M_WRITABLE(*m0) == 0) {
  943                         m = m_dup(*m0, M_DONTWAIT);
  944                         m_freem(*m0);
  945                         *m0 = m;
  946                         if (m == NULL)
  947                                 return (ENOBUFS);
  948                 }
  949                 i = sizeof(struct ether_header);
  950                 m = m_pullup(*m0, i + sizeof(struct ip));
  951                 if (m == NULL) {
  952                         *m0 = NULL;
  953                         return (ENOBUFS);
  954                 }
  955                 ip = (struct ip *)(mtod(m, caddr_t) + i);
  956                 i += (ip->ip_hl << 2);
  957                 cflags = i << HME_XD_TXCKSUM_SSHIFT |
  958                     ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
  959                     HME_XD_TXCKSUM;
  960                 *m0 = m;
  961         }
  962 
  963         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  964             *m0, segs, &nsegs, 0);
  965         if (error == EFBIG) {
  966                 m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
  967                 if (m == NULL) {
  968                         m_freem(*m0);
  969                         *m0 = NULL;
  970                         return (ENOMEM);
  971                 }
  972                 *m0 = m;
  973                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  974                     *m0, segs, &nsegs, 0);
  975                 if (error != 0) {
  976                         m_freem(*m0);
  977                         *m0 = NULL;
  978                         return (error);
  979                 }
  980         } else if (error != 0)
  981                 return (error);
  982         /* If nsegs is wrong then the stack is corrupt. */
  983         KASSERT(nsegs <= HME_NTXSEGS,
  984             ("%s: too many DMA segments (%d)", __func__, nsegs));
  985         if (nsegs == 0) {
  986                 m_freem(*m0);
  987                 *m0 = NULL;
  988                 return (EIO);
  989         }
  990         if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
  991                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
  992                 /* Retry with m_collapse(9)? */
  993                 return (ENOBUFS);
  994         }
  995         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
  996 
  997         si = ri = sc->sc_rb.rb_tdhead;
  998         txd = sc->sc_rb.rb_txd;
  999         pci = sc->sc_flags & HME_PCI;
 1000         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
 1001             HME_XD_GETFLAGS(pci, txd, ri));
 1002         for (i = 0; i < nsegs; i++) {
 1003                 /* Fill the ring entry. */
 1004                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
 1005                 if (i == 0)
 1006                         flags |= HME_XD_SOP | cflags;
 1007                 else
 1008                         flags |= HME_XD_OWN | cflags;
 1009                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1010                     ri, si, flags);
 1011                 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
 1012                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1013                 sc->sc_rb.rb_td_nbusy++;
 1014                 htx->htx_lastdesc = ri;
 1015                 ri = (ri + 1) % HME_NTXDESC;
 1016         }
 1017         sc->sc_rb.rb_tdhead = ri;
 1018 
 1019         /* set EOP on the last descriptor */
 1020         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1021         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1022         flags |= HME_XD_EOP;
 1023         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1024             flags);
 1025         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1026 
 1027         /* Turn the first descriptor ownership to the hme */
 1028         flags = HME_XD_GETFLAGS(pci, txd, si);
 1029         flags |= HME_XD_OWN;
 1030         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1031             ri, flags);
 1032         HME_XD_SETFLAGS(pci, txd, si, flags);
 1033 
 1034         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1035         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1036         htx->htx_m = *m0;
 1037 
 1038         /* start the transmission. */
 1039         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1040 
 1041         return (0);
 1042 }
 1043 
 1044 /*
 1045  * Pass a packet to the higher levels.
 1046  */
 1047 static void
 1048 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1049 {
 1050         struct ifnet *ifp = sc->sc_ifp;
 1051         struct mbuf *m;
 1052 
 1053         if (len <= sizeof(struct ether_header) ||
 1054             len > HME_MAX_FRAMESIZE) {
 1055 #ifdef HMEDEBUG
 1056                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1057                     len);
 1058 #endif
 1059                 ifp->if_ierrors++;
 1060                 hme_discard_rxbuf(sc, ix);
 1061                 return;
 1062         }
 1063 
 1064         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1065         CTR1(KTR_HME, "hme_read: len %d", len);
 1066 
 1067         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1068                 /*
 1069                  * hme_add_rxbuf will leave the old buffer in the ring until
 1070                  * it is sure that a new buffer can be mapped. If it can not,
 1071                  * drop the packet, but leave the interface up.
 1072                  */
 1073                 ifp->if_iqdrops++;
 1074                 hme_discard_rxbuf(sc, ix);
 1075                 return;
 1076         }
 1077 
 1078         ifp->if_ipackets++;
 1079 
 1080         m->m_pkthdr.rcvif = ifp;
 1081         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1082         m_adj(m, HME_RXOFFS);
 1083         /* RX TCP/UDP checksum */
 1084         if (ifp->if_capenable & IFCAP_RXCSUM)
 1085                 hme_rxcksum(m, flags);
 1086         /* Pass the packet up. */
 1087         HME_UNLOCK(sc);
 1088         (*ifp->if_input)(ifp, m);
 1089         HME_LOCK(sc);
 1090 }
 1091 
 1092 static void
 1093 hme_start(struct ifnet *ifp)
 1094 {
 1095         struct hme_softc *sc = ifp->if_softc;
 1096 
 1097         HME_LOCK(sc);
 1098         hme_start_locked(ifp);
 1099         HME_UNLOCK(sc);
 1100 }
 1101 
 1102 static void
 1103 hme_start_locked(struct ifnet *ifp)
 1104 {
 1105         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1106         struct mbuf *m;
 1107         int error, enq = 0;
 1108 
 1109         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1110             IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
 1111                 return;
 1112 
 1113         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1114             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1115                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1116                 if (m == NULL)
 1117                         break;
 1118 
 1119                 error = hme_load_txmbuf(sc, &m);
 1120                 if (error != 0) {
 1121                         if (m == NULL)
 1122                                 break;
 1123                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1124                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1125                         break;
 1126                 }
 1127                 enq++;
 1128                 BPF_MTAP(ifp, m);
 1129         }
 1130 
 1131         if (enq > 0) {
 1132                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1133                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1134                 sc->sc_wdog_timer = 5;
 1135         }
 1136 }
 1137 
 1138 /*
 1139  * Transmit interrupt.
 1140  */
 1141 static void
 1142 hme_tint(struct hme_softc *sc)
 1143 {
 1144         caddr_t txd;
 1145         struct ifnet *ifp = sc->sc_ifp;
 1146         struct hme_txdesc *htx;
 1147         unsigned int ri, txflags;
 1148 
 1149         txd = sc->sc_rb.rb_txd;
 1150         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1151         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1152         /* Fetch current position in the transmit ring */
 1153         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1154                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1155                         CTR0(KTR_HME, "hme_tint: not busy!");
 1156                         break;
 1157                 }
 1158 
 1159                 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
 1160                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1161 
 1162                 if ((txflags & HME_XD_OWN) != 0)
 1163                         break;
 1164 
 1165                 CTR0(KTR_HME, "hme_tint: not owned");
 1166                 --sc->sc_rb.rb_td_nbusy;
 1167                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1168 
 1169                 /* Complete packet transmitted? */
 1170                 if ((txflags & HME_XD_EOP) == 0)
 1171                         continue;
 1172 
 1173                 KASSERT(htx->htx_lastdesc == ri,
 1174                     ("%s: ring indices skewed: %d != %d!",
 1175                     __func__, htx->htx_lastdesc, ri));
 1176                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1177                     BUS_DMASYNC_POSTWRITE);
 1178                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1179 
 1180                 ifp->if_opackets++;
 1181                 m_freem(htx->htx_m);
 1182                 htx->htx_m = NULL;
 1183                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1184                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1185                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1186         }
 1187         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1188 
 1189         /* Update ring */
 1190         sc->sc_rb.rb_tdtail = ri;
 1191 
 1192         hme_start_locked(ifp);
 1193 }
 1194 
 1195 /*
 1196  * RX TCP/UDP checksum
 1197  */
 1198 static void
 1199 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1200 {
 1201         struct ether_header *eh;
 1202         struct ip *ip;
 1203         struct udphdr *uh;
 1204         int32_t hlen, len, pktlen;
 1205         u_int16_t cksum, *opts;
 1206         u_int32_t temp32;
 1207 
 1208         pktlen = m->m_pkthdr.len;
 1209         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1210                 return;
 1211         eh = mtod(m, struct ether_header *);
 1212         if (eh->ether_type != htons(ETHERTYPE_IP))
 1213                 return;
 1214         ip = (struct ip *)(eh + 1);
 1215         if (ip->ip_v != IPVERSION)
 1216                 return;
 1217 
 1218         hlen = ip->ip_hl << 2;
 1219         pktlen -= sizeof(struct ether_header);
 1220         if (hlen < sizeof(struct ip))
 1221                 return;
 1222         if (ntohs(ip->ip_len) < hlen)
 1223                 return;
 1224         if (ntohs(ip->ip_len) != pktlen)
 1225                 return;
 1226         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1227                 return; /* can't handle fragmented packet */
 1228 
 1229         switch (ip->ip_p) {
 1230         case IPPROTO_TCP:
 1231                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1232                         return;
 1233                 break;
 1234         case IPPROTO_UDP:
 1235                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1236                         return;
 1237                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1238                 if (uh->uh_sum == 0)
 1239                         return; /* no checksum */
 1240                 break;
 1241         default:
 1242                 return;
 1243         }
 1244 
 1245         cksum = ~(flags & HME_XD_RXCKSUM);
 1246         /* checksum fixup for IP options */
 1247         len = hlen - sizeof(struct ip);
 1248         if (len > 0) {
 1249                 opts = (u_int16_t *)(ip + 1);
 1250                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1251                         temp32 = cksum - *opts;
 1252                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1253                         cksum = temp32 & 65535;
 1254                 }
 1255         }
 1256         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1257         m->m_pkthdr.csum_data = cksum;
 1258 }
 1259 
 1260 /*
 1261  * Receive interrupt.
 1262  */
 1263 static void
 1264 hme_rint(struct hme_softc *sc)
 1265 {
 1266         caddr_t xdr = sc->sc_rb.rb_rxd;
 1267         struct ifnet *ifp = sc->sc_ifp;
 1268         unsigned int ri, len;
 1269         int progress = 0;
 1270         u_int32_t flags;
 1271 
 1272         /*
 1273          * Process all buffers with valid data.
 1274          */
 1275         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1276         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1277                 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
 1278                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1279                 if ((flags & HME_XD_OWN) != 0)
 1280                         break;
 1281 
 1282                 progress++;
 1283                 if ((flags & HME_XD_OFL) != 0) {
 1284                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1285                             "flags=0x%x\n", ri, flags);
 1286                         ifp->if_ierrors++;
 1287                         hme_discard_rxbuf(sc, ri);
 1288                 } else {
 1289                         len = HME_XD_DECODE_RSIZE(flags);
 1290                         hme_read(sc, ri, len, flags);
 1291                 }
 1292         }
 1293         if (progress) {
 1294                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1295                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1296         }
 1297         sc->sc_rb.rb_rdtail = ri;
 1298 }
 1299 
 1300 static void
 1301 hme_eint(struct hme_softc *sc, u_int status)
 1302 {
 1303 
 1304         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1305                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1306                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1307                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1308                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1309                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1310                 return;
 1311         }
 1312 
 1313         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1314         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1315                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1316                 hme_init_locked(sc);
 1317         }
 1318 }
 1319 
 1320 void
 1321 hme_intr(void *v)
 1322 {
 1323         struct hme_softc *sc = (struct hme_softc *)v;
 1324         u_int32_t status;
 1325 
 1326         HME_LOCK(sc);
 1327         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1328         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1329 
 1330         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1331                 hme_eint(sc, status);
 1332 
 1333         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1334                 hme_rint(sc);
 1335 
 1336         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1337                 hme_tint(sc);
 1338         HME_UNLOCK(sc);
 1339 }
 1340 
 1341 static int
 1342 hme_watchdog(struct hme_softc *sc)
 1343 {
 1344         struct ifnet *ifp = sc->sc_ifp;
 1345 
 1346         HME_LOCK_ASSERT(sc, MA_OWNED);
 1347 
 1348 #ifdef HMEDEBUG
 1349         CTR1(KTR_HME, "hme_watchdog: status %x",
 1350             (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
 1351 #endif
 1352 
 1353         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1354                 return (0);
 1355 
 1356         if ((sc->sc_flags & HME_LINK) != 0)
 1357                 device_printf(sc->sc_dev, "device timeout\n");
 1358         else if (bootverbose)
 1359                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 1360         ++ifp->if_oerrors;
 1361 
 1362         hme_init_locked(sc);
 1363         hme_start_locked(ifp);
 1364         return (EJUSTRETURN);
 1365 }
 1366 
 1367 /*
 1368  * Initialize the MII Management Interface
 1369  */
 1370 static void
 1371 hme_mifinit(struct hme_softc *sc)
 1372 {
 1373         u_int32_t v;
 1374 
 1375         /*
 1376          * Configure the MIF in frame mode, polling disabled, internal PHY
 1377          * selected.
 1378          */
 1379         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1380 
 1381         /*
 1382          * If the currently selected media uses the external transceiver,
 1383          * enable its MII drivers (which basically isolates the internal
 1384          * one and vice versa). In case the current media hasn't been set,
 1385          * yet, we default to the internal transceiver.
 1386          */
 1387         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1388         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1389             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1390             HME_PHYAD_EXTERNAL)
 1391                 v |= HME_MAC_XIF_MIIENABLE;
 1392         else
 1393                 v &= ~HME_MAC_XIF_MIIENABLE;
 1394         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1395 }
 1396 
 1397 /*
 1398  * MII interface
 1399  */
 1400 int
 1401 hme_mii_readreg(device_t dev, int phy, int reg)
 1402 {
 1403         struct hme_softc *sc;
 1404         int n;
 1405         u_int32_t v;
 1406 
 1407         /* We can at most have two PHYs. */
 1408         if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
 1409                 return (0);
 1410 
 1411         sc = device_get_softc(dev);
 1412         /* Select the desired PHY in the MIF configuration register */
 1413         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1414         if (phy == HME_PHYAD_EXTERNAL)
 1415                 v |= HME_MIF_CFG_PHY;
 1416         else
 1417                 v &= ~HME_MIF_CFG_PHY;
 1418         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1419 
 1420         /* Construct the frame command */
 1421         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1422             HME_MIF_FO_TAMSB |
 1423             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1424             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1425             (reg << HME_MIF_FO_REGAD_SHIFT);
 1426 
 1427         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1428         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1429             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1430         for (n = 0; n < 100; n++) {
 1431                 DELAY(1);
 1432                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1433                 if (v & HME_MIF_FO_TALSB)
 1434                         return (v & HME_MIF_FO_DATA);
 1435         }
 1436 
 1437         device_printf(sc->sc_dev, "mii_read timeout\n");
 1438         return (0);
 1439 }
 1440 
 1441 int
 1442 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1443 {
 1444         struct hme_softc *sc;
 1445         int n;
 1446         u_int32_t v;
 1447 
 1448         /* We can at most have two PHYs. */
 1449         if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
 1450                 return (0);
 1451 
 1452         sc = device_get_softc(dev);
 1453         /* Select the desired PHY in the MIF configuration register */
 1454         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1455         if (phy == HME_PHYAD_EXTERNAL)
 1456                 v |= HME_MIF_CFG_PHY;
 1457         else
 1458                 v &= ~HME_MIF_CFG_PHY;
 1459         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1460 
 1461         /* Construct the frame command */
 1462         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1463             HME_MIF_FO_TAMSB                            |
 1464             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1465             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1466             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1467             (val & HME_MIF_FO_DATA);
 1468 
 1469         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1470         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1471             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1472         for (n = 0; n < 100; n++) {
 1473                 DELAY(1);
 1474                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1475                 if (v & HME_MIF_FO_TALSB)
 1476                         return (1);
 1477         }
 1478 
 1479         device_printf(sc->sc_dev, "mii_write timeout\n");
 1480         return (0);
 1481 }
 1482 
 1483 void
 1484 hme_mii_statchg(device_t dev)
 1485 {
 1486         struct hme_softc *sc;
 1487         uint32_t rxcfg, txcfg;
 1488 
 1489         sc = device_get_softc(dev);
 1490 
 1491 #ifdef HMEDEBUG
 1492         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1493                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1494 #endif
 1495 
 1496         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 1497             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 1498                 sc->sc_flags |= HME_LINK;
 1499         else
 1500                 sc->sc_flags &= ~HME_LINK;
 1501 
 1502         txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1503         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
 1504             HME_MAC_TXCFG_ENABLE, 0))
 1505                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 1506         rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1507         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
 1508             HME_MAC_RXCFG_ENABLE, 0))
 1509                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1510 
 1511         /* Set the MAC Full Duplex bit appropriately. */
 1512         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1513                 txcfg |= HME_MAC_TXCFG_FULLDPLX;
 1514         else
 1515                 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
 1516         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
 1517 
 1518         if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1519             (sc->sc_flags & HME_LINK) != 0) {
 1520                 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
 1521                     HME_MAC_TXCFG_ENABLE))
 1522                         device_printf(sc->sc_dev, "cannot enable TX MAC\n");
 1523                 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
 1524                     HME_MAC_RXCFG_ENABLE))
 1525                         device_printf(sc->sc_dev, "cannot enable RX MAC\n");
 1526         }
 1527 }
 1528 
 1529 static int
 1530 hme_mediachange(struct ifnet *ifp)
 1531 {
 1532         struct hme_softc *sc = ifp->if_softc;
 1533         int error;
 1534 
 1535         HME_LOCK(sc);
 1536         error = hme_mediachange_locked(sc);
 1537         HME_UNLOCK(sc);
 1538         return (error);
 1539 }
 1540 
 1541 static int
 1542 hme_mediachange_locked(struct hme_softc *sc)
 1543 {
 1544         struct mii_softc *child;
 1545 
 1546         HME_LOCK_ASSERT(sc, MA_OWNED);
 1547 
 1548 #ifdef HMEDEBUG
 1549         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1550                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1551 #endif
 1552 
 1553         hme_mifinit(sc);
 1554 
 1555         /*
 1556          * If both PHYs are present reset them. This is required for
 1557          * unisolating the previously isolated PHY when switching PHYs.
 1558          * As the above hme_mifinit() call will set the MII drivers in
 1559          * the XIF configuration register accoring to the currently
 1560          * selected media, there should be no window during which the
 1561          * data paths of both transceivers are open at the same time,
 1562          * even if the PHY device drivers use MIIF_NOISOLATE.
 1563          */
 1564         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1565                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1566                         mii_phy_reset(child);
 1567         return (mii_mediachg(sc->sc_mii));
 1568 }
 1569 
 1570 static void
 1571 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1572 {
 1573         struct hme_softc *sc = ifp->if_softc;
 1574 
 1575         HME_LOCK(sc);
 1576         if ((ifp->if_flags & IFF_UP) == 0) {
 1577                 HME_UNLOCK(sc);
 1578                 return;
 1579         }
 1580 
 1581         mii_pollstat(sc->sc_mii);
 1582         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1583         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1584         HME_UNLOCK(sc);
 1585 }
 1586 
 1587 /*
 1588  * Process an ioctl request.
 1589  */
 1590 static int
 1591 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1592 {
 1593         struct hme_softc *sc = ifp->if_softc;
 1594         struct ifreq *ifr = (struct ifreq *)data;
 1595         int error = 0;
 1596 
 1597         switch (cmd) {
 1598         case SIOCSIFFLAGS:
 1599                 HME_LOCK(sc);
 1600                 if ((ifp->if_flags & IFF_UP) != 0) {
 1601                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1602                             ((ifp->if_flags ^ sc->sc_ifflags) &
 1603                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1604                                 hme_setladrf(sc, 1);
 1605                         else
 1606                                 hme_init_locked(sc);
 1607                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1608                         hme_stop(sc);
 1609                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1610                         sc->sc_csum_features |= CSUM_UDP;
 1611                 else
 1612                         sc->sc_csum_features &= ~CSUM_UDP;
 1613                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1614                         ifp->if_hwassist = sc->sc_csum_features;
 1615                 sc->sc_ifflags = ifp->if_flags;
 1616                 HME_UNLOCK(sc);
 1617                 break;
 1618 
 1619         case SIOCADDMULTI:
 1620         case SIOCDELMULTI:
 1621                 HME_LOCK(sc);
 1622                 hme_setladrf(sc, 1);
 1623                 HME_UNLOCK(sc);
 1624                 error = 0;
 1625                 break;
 1626         case SIOCGIFMEDIA:
 1627         case SIOCSIFMEDIA:
 1628                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1629                 break;
 1630         case SIOCSIFCAP:
 1631                 HME_LOCK(sc);
 1632                 ifp->if_capenable = ifr->ifr_reqcap;
 1633                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1634                         ifp->if_hwassist = sc->sc_csum_features;
 1635                 else
 1636                         ifp->if_hwassist = 0;
 1637                 HME_UNLOCK(sc);
 1638                 break;
 1639         default:
 1640                 error = ether_ioctl(ifp, cmd, data);
 1641                 break;
 1642         }
 1643 
 1644         return (error);
 1645 }
 1646 
 1647 /*
 1648  * Set up the logical address filter.
 1649  */
 1650 static void
 1651 hme_setladrf(struct hme_softc *sc, int reenable)
 1652 {
 1653         struct ifnet *ifp = sc->sc_ifp;
 1654         struct ifmultiaddr *inm;
 1655         u_int32_t crc;
 1656         u_int32_t hash[4];
 1657         u_int32_t macc;
 1658 
 1659         HME_LOCK_ASSERT(sc, MA_OWNED);
 1660         /* Clear the hash table. */
 1661         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1662 
 1663         /* Get the current RX configuration. */
 1664         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1665 
 1666         /*
 1667          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 1668          * and hash filter.  Depending on the case, the right bit will be
 1669          * enabled.
 1670          */
 1671         macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
 1672 
 1673         /*
 1674          * Disable the receiver while changing it's state as the documentation
 1675          * mandates.
 1676          * We then must wait until the bit clears in the register. This should
 1677          * take at most 3.5ms.
 1678          */
 1679         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1680             HME_MAC_RXCFG_ENABLE, 0))
 1681                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1682         /* Disable the hash filter before writing to the filter registers. */
 1683         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1684             HME_MAC_RXCFG_HENABLE, 0))
 1685                 device_printf(sc->sc_dev, "cannot disable hash filter\n");
 1686 
 1687         /* Make the RX MAC really SIMPLEX. */
 1688         macc |= HME_MAC_RXCFG_ME;
 1689         if (reenable)
 1690                 macc |= HME_MAC_RXCFG_ENABLE;
 1691         else
 1692                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1693 
 1694         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1695                 macc |= HME_MAC_RXCFG_PMISC;
 1696                 goto chipit;
 1697         }
 1698         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 1699                 macc |= HME_MAC_RXCFG_PGRP;
 1700                 goto chipit;
 1701         }
 1702 
 1703         macc |= HME_MAC_RXCFG_HENABLE;
 1704 
 1705         /*
 1706          * Set up multicast address filter by passing all multicast addresses
 1707          * through a crc generator, and then using the high order 6 bits as an
 1708          * index into the 64 bit logical address filter.  The high order bit
 1709          * selects the word, while the rest of the bits select the bit within
 1710          * the word.
 1711          */
 1712 
 1713         IF_ADDR_LOCK(ifp);
 1714         TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 1715                 if (inm->ifma_addr->sa_family != AF_LINK)
 1716                         continue;
 1717                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1718                     inm->ifma_addr), ETHER_ADDR_LEN);
 1719 
 1720                 /* Just want the 6 most significant bits. */
 1721                 crc >>= 26;
 1722 
 1723                 /* Set the corresponding bit in the filter. */
 1724                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1725         }
 1726         IF_ADDR_UNLOCK(ifp);
 1727 
 1728 chipit:
 1729         /* Now load the hash table into the chip */
 1730         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1731         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1732         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1733         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1734         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1735             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1736             HME_MAC_RXCFG_ME)))
 1737                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1738 }

Cache object: 4d05105a3ee970013469ef30be3e9d2d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.