The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/hme/if_hme.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 The NetBSD Foundation, Inc.
    3  * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to The NetBSD Foundation
    7  * by Paul Kranenburg.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *        This product includes software developed by the NetBSD
   20  *        Foundation, Inc. and its contributors.
   21  * 4. Neither the name of The NetBSD Foundation nor the names of its
   22  *    contributors may be used to endorse or promote products derived
   23  *    from this software without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  *
   37  *      from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/8.3/sys/dev/hme/if_hme.c 224318 2011-07-25 14:46:24Z marius $");
   42 
   43 /*
   44  * HME Ethernet module driver.
   45  *
   46  * The HME is e.g. part of the PCIO PCI multi function device.
   47  * It supports TX gathering and TX and RX checksum offloading.
   48  * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
   49  * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
   50  * are skipped to make sure the header after the ethernet header is aligned on a
   51  * natural boundary, so this ensures minimal wastage in the most common case.
   52  *
   53  * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
   54  * maximum packet size (this is not verified). Buffers starting on odd
   55  * boundaries must be mapped so that the burst can start on a natural boundary.
   56  *
   57  * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
   58  * In reality, we can do the same technique for UDP datagram too. However,
   59  * the hardware doesn't compensate the checksum for UDP datagram which can yield
   60  * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
   61  * can be reactivated by setting special link option link0 with ifconfig(8).
   62  */
   63 #define HME_CSUM_FEATURES       (CSUM_TCP)
   64 #if 0
   65 #define HMEDEBUG
   66 #endif
   67 #define KTR_HME         KTR_CT2         /* XXX */
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/bus.h>
   72 #include <sys/endian.h>
   73 #include <sys/kernel.h>
   74 #include <sys/module.h>
   75 #include <sys/ktr.h>
   76 #include <sys/mbuf.h>
   77 #include <sys/malloc.h>
   78 #include <sys/socket.h>
   79 #include <sys/sockio.h>
   80 
   81 #include <net/bpf.h>
   82 #include <net/ethernet.h>
   83 #include <net/if.h>
   84 #include <net/if_arp.h>
   85 #include <net/if_dl.h>
   86 #include <net/if_media.h>
   87 #include <net/if_types.h>
   88 #include <net/if_vlan_var.h>
   89 
   90 #include <netinet/in.h>
   91 #include <netinet/in_systm.h>
   92 #include <netinet/ip.h>
   93 #include <netinet/tcp.h>
   94 #include <netinet/udp.h>
   95 
   96 #include <dev/mii/mii.h>
   97 #include <dev/mii/miivar.h>
   98 
   99 #include <machine/bus.h>
  100 
  101 #include <dev/hme/if_hmereg.h>
  102 #include <dev/hme/if_hmevar.h>
  103 
  104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
  105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
  106 
  107 static void     hme_start(struct ifnet *);
  108 static void     hme_start_locked(struct ifnet *);
  109 static void     hme_stop(struct hme_softc *);
  110 static int      hme_ioctl(struct ifnet *, u_long, caddr_t);
  111 static void     hme_tick(void *);
  112 static int      hme_watchdog(struct hme_softc *);
  113 static void     hme_init(void *);
  114 static void     hme_init_locked(struct hme_softc *);
  115 static int      hme_add_rxbuf(struct hme_softc *, unsigned int, int);
  116 static int      hme_meminit(struct hme_softc *);
  117 static int      hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
  118     u_int32_t, u_int32_t);
  119 static void     hme_mifinit(struct hme_softc *);
  120 static void     hme_setladrf(struct hme_softc *, int);
  121 
  122 static int      hme_mediachange(struct ifnet *);
  123 static int      hme_mediachange_locked(struct hme_softc *);
  124 static void     hme_mediastatus(struct ifnet *, struct ifmediareq *);
  125 
  126 static int      hme_load_txmbuf(struct hme_softc *, struct mbuf **);
  127 static void     hme_read(struct hme_softc *, int, int, u_int32_t);
  128 static void     hme_eint(struct hme_softc *, u_int);
  129 static void     hme_rint(struct hme_softc *);
  130 static void     hme_tint(struct hme_softc *);
  131 static void     hme_rxcksum(struct mbuf *, u_int32_t);
  132 
  133 static void     hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
  134 
  135 devclass_t hme_devclass;
  136 
  137 static int hme_nerr;
  138 
  139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
  140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
  141 
  142 #define HME_SPC_READ_4(spc, sc, offs) \
  143         bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  144             (offs))
  145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
  146         bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  147             (offs), (v))
  148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
  149         bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
  150             (offs), (l), (f))
  151 
  152 #define HME_SEB_READ_4(sc, offs)        HME_SPC_READ_4(seb, (sc), (offs))
  153 #define HME_SEB_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(seb, (sc), (offs), (v))
  154 #define HME_SEB_BARRIER(sc, offs, l, f) \
  155         HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
  156 #define HME_ERX_READ_4(sc, offs)        HME_SPC_READ_4(erx, (sc), (offs))
  157 #define HME_ERX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(erx, (sc), (offs), (v))
  158 #define HME_ERX_BARRIER(sc, offs, l, f) \
  159         HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
  160 #define HME_ETX_READ_4(sc, offs)        HME_SPC_READ_4(etx, (sc), (offs))
  161 #define HME_ETX_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(etx, (sc), (offs), (v))
  162 #define HME_ETX_BARRIER(sc, offs, l, f) \
  163         HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
  164 #define HME_MAC_READ_4(sc, offs)        HME_SPC_READ_4(mac, (sc), (offs))
  165 #define HME_MAC_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mac, (sc), (offs), (v))
  166 #define HME_MAC_BARRIER(sc, offs, l, f) \
  167         HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
  168 #define HME_MIF_READ_4(sc, offs)        HME_SPC_READ_4(mif, (sc), (offs))
  169 #define HME_MIF_WRITE_4(sc, offs, v)    HME_SPC_WRITE_4(mif, (sc), (offs), (v))
  170 #define HME_MIF_BARRIER(sc, offs, l, f) \
  171         HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
  172 
  173 #define HME_MAXERR      5
  174 #define HME_WHINE(dev, ...) do {                                        \
  175         if (hme_nerr++ < HME_MAXERR)                                    \
  176                 device_printf(dev, __VA_ARGS__);                        \
  177         if (hme_nerr == HME_MAXERR) {                                   \
  178                 device_printf(dev, "too many errors; not reporting "    \
  179                     "any more\n");                                      \
  180         }                                                               \
  181 } while(0)
  182 
  183 /* Support oversized VLAN frames. */
  184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
  185 
  186 int
  187 hme_config(struct hme_softc *sc)
  188 {
  189         struct ifnet *ifp;
  190         struct mii_softc *child;
  191         bus_size_t size;
  192         int error, rdesc, tdesc, i;
  193 
  194         ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
  195         if (ifp == NULL)
  196                 return (ENOSPC);
  197 
  198         /*
  199          * HME common initialization.
  200          *
  201          * hme_softc fields that must be initialized by the front-end:
  202          *
  203          * the DMA bus tag:
  204          *      sc_dmatag
  205          *
  206          * the bus handles, tags and offsets (splitted for SBus compatibility):
  207          *      sc_seb{t,h,o}   (Shared Ethernet Block registers)
  208          *      sc_erx{t,h,o}   (Receiver Unit registers)
  209          *      sc_etx{t,h,o}   (Transmitter Unit registers)
  210          *      sc_mac{t,h,o}   (MAC registers)
  211          *      sc_mif{t,h,o}   (Management Interface registers)
  212          *
  213          * the maximum bus burst size:
  214          *      sc_burst
  215          *
  216          */
  217 
  218         callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
  219 
  220         /* Make sure the chip is stopped. */
  221         HME_LOCK(sc);
  222         hme_stop(sc);
  223         HME_UNLOCK(sc);
  224 
  225         error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
  226             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  227             BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
  228             NULL, NULL, &sc->sc_pdmatag);
  229         if (error)
  230                 goto fail_ifnet;
  231 
  232         /*
  233          * Create control, RX and TX mbuf DMA tags.
  234          * Buffer descriptors must be aligned on a 2048 byte boundary;
  235          * take this into account when calculating the size. Note that
  236          * the maximum number of descriptors (256) occupies 2048 bytes,
  237          * so we allocate that much regardless of HME_N*DESC.
  238          */
  239         size = 4096;
  240         error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
  241             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
  242             1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
  243         if (error)
  244                 goto fail_ptag;
  245 
  246         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  247             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
  248             1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
  249         if (error)
  250                 goto fail_ctag;
  251 
  252         error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
  253             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
  254             MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
  255             NULL, NULL, &sc->sc_tdmatag);
  256         if (error)
  257                 goto fail_rtag;
  258 
  259         /* Allocate the control DMA buffer. */
  260         error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
  261             BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
  262         if (error != 0) {
  263                 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
  264                 goto fail_ttag;
  265         }
  266 
  267         /* Load the control DMA buffer. */
  268         sc->sc_rb.rb_dmabase = 0;
  269         if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
  270             sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
  271             sc->sc_rb.rb_dmabase == 0) {
  272                 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
  273                     error);
  274                 goto fail_free;
  275         }
  276         CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
  277             sc->sc_rb.rb_dmabase);
  278 
  279         /*
  280          * Prepare the RX descriptors. rdesc serves as marker for the last
  281          * processed descriptor and may be used later on.
  282          */
  283         for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
  284                 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
  285                 error = bus_dmamap_create(sc->sc_rdmatag, 0,
  286                     &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
  287                 if (error != 0)
  288                         goto fail_rxdesc;
  289         }
  290         error = bus_dmamap_create(sc->sc_rdmatag, 0,
  291             &sc->sc_rb.rb_spare_dmamap);
  292         if (error != 0)
  293                 goto fail_rxdesc;
  294         /* Same for the TX descs. */
  295         for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
  296                 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
  297                 error = bus_dmamap_create(sc->sc_tdmatag, 0,
  298                     &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
  299                 if (error != 0)
  300                         goto fail_txdesc;
  301         }
  302 
  303         sc->sc_csum_features = HME_CSUM_FEATURES;
  304         /* Initialize ifnet structure. */
  305         ifp->if_softc = sc;
  306         if_initname(ifp, device_get_name(sc->sc_dev),
  307             device_get_unit(sc->sc_dev));
  308         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
  309         ifp->if_start = hme_start;
  310         ifp->if_ioctl = hme_ioctl;
  311         ifp->if_init = hme_init;
  312         IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
  313         ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
  314         IFQ_SET_READY(&ifp->if_snd);
  315 
  316         hme_mifinit(sc);
  317 
  318         /*
  319          * DP83840A used with HME chips don't advertise their media
  320          * capabilities themselves properly so force writing the ANAR
  321          * according to the BMSR in mii_phy_setmedia().
  322          */
  323         error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  324             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
  325             MII_OFFSET_ANY, MIIF_FORCEANEG);
  326         i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
  327             hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
  328             MII_OFFSET_ANY, MIIF_FORCEANEG);
  329         if (error != 0 && i != 0) {
  330                 error = ENXIO;
  331                 device_printf(sc->sc_dev, "attaching PHYs failed\n");
  332                 goto fail_rxdesc;
  333         }
  334         sc->sc_mii = device_get_softc(sc->sc_miibus);
  335 
  336         /*
  337          * Walk along the list of attached MII devices and
  338          * establish an `MII instance' to `PHY number'
  339          * mapping. We'll use this mapping to enable the MII
  340          * drivers of the external transceiver according to
  341          * the currently selected media.
  342          */
  343         sc->sc_phys[0] = sc->sc_phys[1] = -1;
  344         LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
  345                 /*
  346                  * Note: we support just two PHYs: the built-in
  347                  * internal device and an external on the MII
  348                  * connector.
  349                  */
  350                 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
  351                     child->mii_phy != HME_PHYAD_INTERNAL) ||
  352                     child->mii_inst > 1) {
  353                         device_printf(sc->sc_dev, "cannot accommodate "
  354                             "MII device %s at phy %d, instance %d\n",
  355                             device_get_name(child->mii_dev),
  356                             child->mii_phy, child->mii_inst);
  357                         continue;
  358                 }
  359 
  360                 sc->sc_phys[child->mii_inst] = child->mii_phy;
  361         }
  362 
  363         /* Attach the interface. */
  364         ether_ifattach(ifp, sc->sc_enaddr);
  365 
  366         /*
  367          * Tell the upper layer(s) we support long frames/checksum offloads.
  368          */
  369         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
  370         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  371         ifp->if_hwassist |= sc->sc_csum_features;
  372         ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
  373         return (0);
  374 
  375 fail_txdesc:
  376         for (i = 0; i < tdesc; i++) {
  377                 bus_dmamap_destroy(sc->sc_tdmatag,
  378                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  379         }
  380         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  381 fail_rxdesc:
  382         for (i = 0; i < rdesc; i++) {
  383                 bus_dmamap_destroy(sc->sc_rdmatag,
  384                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  385         }
  386         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  387 fail_free:
  388         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  389 fail_ttag:
  390         bus_dma_tag_destroy(sc->sc_tdmatag);
  391 fail_rtag:
  392         bus_dma_tag_destroy(sc->sc_rdmatag);
  393 fail_ctag:
  394         bus_dma_tag_destroy(sc->sc_cdmatag);
  395 fail_ptag:
  396         bus_dma_tag_destroy(sc->sc_pdmatag);
  397 fail_ifnet:
  398         if_free(ifp);
  399         return (error);
  400 }
  401 
  402 void
  403 hme_detach(struct hme_softc *sc)
  404 {
  405         struct ifnet *ifp = sc->sc_ifp;
  406         int i;
  407 
  408         HME_LOCK(sc);
  409         hme_stop(sc);
  410         HME_UNLOCK(sc);
  411         callout_drain(&sc->sc_tick_ch);
  412         ether_ifdetach(ifp);
  413         if_free(ifp);
  414         device_delete_child(sc->sc_dev, sc->sc_miibus);
  415 
  416         for (i = 0; i < HME_NTXQ; i++) {
  417                 bus_dmamap_destroy(sc->sc_tdmatag,
  418                     sc->sc_rb.rb_txdesc[i].htx_dmamap);
  419         }
  420         bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
  421         for (i = 0; i < HME_NRXDESC; i++) {
  422                 bus_dmamap_destroy(sc->sc_rdmatag,
  423                     sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
  424         }
  425         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  426             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
  427         bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
  428         bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
  429         bus_dma_tag_destroy(sc->sc_tdmatag);
  430         bus_dma_tag_destroy(sc->sc_rdmatag);
  431         bus_dma_tag_destroy(sc->sc_cdmatag);
  432         bus_dma_tag_destroy(sc->sc_pdmatag);
  433 }
  434 
  435 void
  436 hme_suspend(struct hme_softc *sc)
  437 {
  438 
  439         HME_LOCK(sc);
  440         hme_stop(sc);
  441         HME_UNLOCK(sc);
  442 }
  443 
  444 void
  445 hme_resume(struct hme_softc *sc)
  446 {
  447         struct ifnet *ifp = sc->sc_ifp;
  448 
  449         HME_LOCK(sc);
  450         if ((ifp->if_flags & IFF_UP) != 0)
  451                 hme_init_locked(sc);
  452         HME_UNLOCK(sc);
  453 }
  454 
  455 static void
  456 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
  457 {
  458         struct hme_softc *sc = (struct hme_softc *)xsc;
  459 
  460         if (error != 0)
  461                 return;
  462         KASSERT(nsegs == 1,
  463             ("%s: too many DMA segments (%d)", __func__, nsegs));
  464         sc->sc_rb.rb_dmabase = segs[0].ds_addr;
  465 }
  466 
  467 static void
  468 hme_tick(void *arg)
  469 {
  470         struct hme_softc *sc = arg;
  471         struct ifnet *ifp;
  472 
  473         HME_LOCK_ASSERT(sc, MA_OWNED);
  474 
  475         ifp = sc->sc_ifp;
  476         /*
  477          * Unload collision counters
  478          */
  479         ifp->if_collisions +=
  480                 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
  481                 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
  482                 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
  483                 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
  484 
  485         /*
  486          * then clear the hardware counters.
  487          */
  488         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  489         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  490         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  491         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  492 
  493         mii_tick(sc->sc_mii);
  494 
  495         if (hme_watchdog(sc) == EJUSTRETURN)
  496                 return;
  497 
  498         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  499 }
  500 
  501 static void
  502 hme_stop(struct hme_softc *sc)
  503 {
  504         u_int32_t v;
  505         int n;
  506 
  507         callout_stop(&sc->sc_tick_ch);
  508         sc->sc_wdog_timer = 0;
  509         sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
  510         sc->sc_flags &= ~HME_LINK;
  511 
  512         /* Mask all interrupts */
  513         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
  514 
  515         /* Reset transmitter and receiver */
  516         HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
  517             HME_SEB_RESET_ERX);
  518         HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
  519             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  520         for (n = 0; n < 20; n++) {
  521                 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
  522                 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
  523                         return;
  524                 DELAY(20);
  525         }
  526 
  527         device_printf(sc->sc_dev, "hme_stop: reset failed\n");
  528 }
  529 
  530 /*
  531  * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
  532  * ring for subsequent use.
  533  */
  534 static __inline void
  535 hme_discard_rxbuf(struct hme_softc *sc, int ix)
  536 {
  537 
  538         /*
  539          * Dropped a packet, reinitialize the descriptor and turn the
  540          * ownership back to the hardware.
  541          */
  542         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
  543             ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
  544             &sc->sc_rb.rb_rxdesc[ix])));
  545 }
  546 
  547 static int
  548 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
  549 {
  550         struct hme_rxdesc *rd;
  551         struct mbuf *m;
  552         bus_dma_segment_t segs[1];
  553         bus_dmamap_t map;
  554         uintptr_t b;
  555         int a, unmap, nsegs;
  556 
  557         rd = &sc->sc_rb.rb_rxdesc[ri];
  558         unmap = rd->hrx_m != NULL;
  559         if (unmap && keepold) {
  560                 /*
  561                  * Reinitialize the descriptor flags, as they may have been
  562                  * altered by the hardware.
  563                  */
  564                 hme_discard_rxbuf(sc, ri);
  565                 return (0);
  566         }
  567         if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
  568                 return (ENOBUFS);
  569         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
  570         b = mtod(m, uintptr_t);
  571         /*
  572          * Required alignment boundary. At least 16 is needed, but since
  573          * the mapping must be done in a way that a burst can start on a
  574          * natural boundary we might need to extend this.
  575          */
  576         a = imax(HME_MINRXALIGN, sc->sc_burst);
  577         /*
  578          * Make sure the buffer suitably aligned. The 2 byte offset is removed
  579          * when the mbuf is handed up. XXX: this ensures at least 16 byte
  580          * alignment of the header adjacent to the ethernet header, which
  581          * should be sufficient in all cases. Nevertheless, this second-guesses
  582          * ALIGN().
  583          */
  584         m_adj(m, roundup2(b, a) - b);
  585         if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
  586             m, segs, &nsegs, 0) != 0) {
  587                 m_freem(m);
  588                 return (ENOBUFS);
  589         }
  590         /* If nsegs is wrong then the stack is corrupt. */
  591         KASSERT(nsegs == 1,
  592             ("%s: too many DMA segments (%d)", __func__, nsegs));
  593         if (unmap) {
  594                 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
  595                     BUS_DMASYNC_POSTREAD);
  596                 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
  597         }
  598         map = rd->hrx_dmamap;
  599         rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
  600         sc->sc_rb.rb_spare_dmamap = map;
  601         bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
  602         HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  603             segs[0].ds_addr);
  604         rd->hrx_m = m;
  605         HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
  606             HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
  607         return (0);
  608 }
  609 
  610 static int
  611 hme_meminit(struct hme_softc *sc)
  612 {
  613         struct hme_ring *hr = &sc->sc_rb;
  614         struct hme_txdesc *td;
  615         bus_addr_t dma;
  616         caddr_t p;
  617         unsigned int i;
  618         int error;
  619 
  620         p = hr->rb_membase;
  621         dma = hr->rb_dmabase;
  622 
  623         /*
  624          * Allocate transmit descriptors
  625          */
  626         hr->rb_txd = p;
  627         hr->rb_txddma = dma;
  628         p += HME_NTXDESC * HME_XD_SIZE;
  629         dma += HME_NTXDESC * HME_XD_SIZE;
  630         /*
  631          * We have reserved descriptor space until the next 2048 byte
  632          * boundary.
  633          */
  634         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  635         p = (caddr_t)roundup((u_long)p, 2048);
  636 
  637         /*
  638          * Allocate receive descriptors
  639          */
  640         hr->rb_rxd = p;
  641         hr->rb_rxddma = dma;
  642         p += HME_NRXDESC * HME_XD_SIZE;
  643         dma += HME_NRXDESC * HME_XD_SIZE;
  644         /* Again move forward to the next 2048 byte boundary.*/
  645         dma = (bus_addr_t)roundup((u_long)dma, 2048);
  646         p = (caddr_t)roundup((u_long)p, 2048);
  647 
  648         /*
  649          * Initialize transmit buffer descriptors
  650          */
  651         for (i = 0; i < HME_NTXDESC; i++) {
  652                 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  653                 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
  654         }
  655 
  656         STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
  657         STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
  658         for (i = 0; i < HME_NTXQ; i++) {
  659                 td = &sc->sc_rb.rb_txdesc[i];
  660                 if (td->htx_m != NULL) {
  661                         bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
  662                             BUS_DMASYNC_POSTWRITE);
  663                         bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
  664                         m_freem(td->htx_m);
  665                         td->htx_m = NULL;
  666                 }
  667                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
  668         }
  669 
  670         /*
  671          * Initialize receive buffer descriptors
  672          */
  673         for (i = 0; i < HME_NRXDESC; i++) {
  674                 error = hme_add_rxbuf(sc, i, 1);
  675                 if (error != 0)
  676                         return (error);
  677         }
  678 
  679         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
  680             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
  681 
  682         hr->rb_tdhead = hr->rb_tdtail = 0;
  683         hr->rb_td_nbusy = 0;
  684         hr->rb_rdtail = 0;
  685         CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
  686             hr->rb_txddma);
  687         CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
  688             hr->rb_rxddma);
  689         CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
  690             *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
  691         CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
  692             *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
  693         return (0);
  694 }
  695 
  696 static int
  697 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
  698     u_int32_t clr, u_int32_t set)
  699 {
  700         int i = 0;
  701 
  702         val &= ~clr;
  703         val |= set;
  704         HME_MAC_WRITE_4(sc, reg, val);
  705         HME_MAC_BARRIER(sc, reg, 4,
  706             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
  707         if (clr == 0 && set == 0)
  708                 return (1);     /* just write, no bits to wait for */
  709         do {
  710                 DELAY(100);
  711                 i++;
  712                 val = HME_MAC_READ_4(sc, reg);
  713                 if (i > 40) {
  714                         /* After 3.5ms, we should have been done. */
  715                         device_printf(sc->sc_dev, "timeout while writing to "
  716                             "MAC configuration register\n");
  717                         return (0);
  718                 }
  719         } while ((val & clr) != 0 && (val & set) != set);
  720         return (1);
  721 }
  722 
  723 /*
  724  * Initialization of interface; set up initialization block
  725  * and transmit/receive descriptor rings.
  726  */
  727 static void
  728 hme_init(void *xsc)
  729 {
  730         struct hme_softc *sc = (struct hme_softc *)xsc;
  731 
  732         HME_LOCK(sc);
  733         hme_init_locked(sc);
  734         HME_UNLOCK(sc);
  735 }
  736 
  737 static void
  738 hme_init_locked(struct hme_softc *sc)
  739 {
  740         struct ifnet *ifp = sc->sc_ifp;
  741         u_int8_t *ea;
  742         u_int32_t n, v;
  743 
  744         HME_LOCK_ASSERT(sc, MA_OWNED);
  745         /*
  746          * Initialization sequence. The numbered steps below correspond
  747          * to the sequence outlined in section 6.3.5.1 in the Ethernet
  748          * Channel Engine manual (part of the PCIO manual).
  749          * See also the STP2002-STQ document from Sun Microsystems.
  750          */
  751 
  752         /* step 1 & 2. Reset the Ethernet Channel */
  753         hme_stop(sc);
  754 
  755         /* Re-initialize the MIF */
  756         hme_mifinit(sc);
  757 
  758 #if 0
  759         /* Mask all MIF interrupts, just in case */
  760         HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
  761 #endif
  762 
  763         /* step 3. Setup data structures in host memory */
  764         if (hme_meminit(sc) != 0) {
  765                 device_printf(sc->sc_dev, "out of buffers; init aborted.");
  766                 return;
  767         }
  768 
  769         /* step 4. TX MAC registers & counters */
  770         HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
  771         HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
  772         HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
  773         HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
  774         HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
  775 
  776         /* Load station MAC address */
  777         ea = IF_LLADDR(ifp);
  778         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
  779         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
  780         HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
  781 
  782         /*
  783          * Init seed for backoff
  784          * (source suggested by manual: low 10 bits of MAC address)
  785          */
  786         v = ((ea[4] << 8) | ea[5]) & 0x3fff;
  787         HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
  788 
  789         /* Note: Accepting power-on default for other MAC registers here.. */
  790 
  791         /* step 5. RX MAC registers & counters */
  792         hme_setladrf(sc, 0);
  793 
  794         /* step 6 & 7. Program Descriptor Ring Base Addresses */
  795         HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
  796         /* Transmit Descriptor ring size: in increments of 16 */
  797         HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
  798 
  799         HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
  800         HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
  801 
  802         /* step 8. Global Configuration & Interrupt Mask */
  803         HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
  804             ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
  805                 HME_SEB_STAT_HOSTTOTX |
  806                 HME_SEB_STAT_RXTOHOST |
  807                 HME_SEB_STAT_TXALL |
  808                 HME_SEB_STAT_TXPERR |
  809                 HME_SEB_STAT_RCNTEXP |
  810                 HME_SEB_STAT_ALL_ERRORS ));
  811 
  812         switch (sc->sc_burst) {
  813         default:
  814                 v = 0;
  815                 break;
  816         case 16:
  817                 v = HME_SEB_CFG_BURST16;
  818                 break;
  819         case 32:
  820                 v = HME_SEB_CFG_BURST32;
  821                 break;
  822         case 64:
  823                 v = HME_SEB_CFG_BURST64;
  824                 break;
  825         }
  826         /*
  827          * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
  828          * Allowing 64bit transfers breaks TX checksum offload as well.
  829          * Don't know this comes from hardware bug or driver's DMAing
  830          * scheme.
  831          *
  832          * if (sc->sc_flags & HME_PCI == 0)
  833          *      v |= HME_SEB_CFG_64BIT;
  834          */
  835         HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
  836 
  837         /* step 9. ETX Configuration: use mostly default values */
  838 
  839         /* Enable DMA */
  840         v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
  841         v |= HME_ETX_CFG_DMAENABLE;
  842         HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
  843 
  844         /* step 10. ERX Configuration */
  845         v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
  846 
  847         /* Encode Receive Descriptor ring size: four possible values */
  848         v &= ~HME_ERX_CFG_RINGSIZEMSK;
  849         switch (HME_NRXDESC) {
  850         case 32:
  851                 v |= HME_ERX_CFG_RINGSIZE32;
  852                 break;
  853         case 64:
  854                 v |= HME_ERX_CFG_RINGSIZE64;
  855                 break;
  856         case 128:
  857                 v |= HME_ERX_CFG_RINGSIZE128;
  858                 break;
  859         case 256:
  860                 v |= HME_ERX_CFG_RINGSIZE256;
  861                 break;
  862         default:
  863                 printf("hme: invalid Receive Descriptor ring size\n");
  864                 break;
  865         }
  866 
  867         /* Enable DMA, fix RX first byte offset. */
  868         v &= ~HME_ERX_CFG_FBO_MASK;
  869         v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
  870         /* RX TCP/UDP checksum offset */
  871         n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
  872         n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
  873         v |= n;
  874         CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
  875         HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
  876 
  877         /* step 11. XIF Configuration */
  878         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
  879         v |= HME_MAC_XIF_OE;
  880         CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
  881         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
  882 
  883         /* step 12. RX_MAC Configuration Register */
  884         v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
  885         v |= HME_MAC_RXCFG_ENABLE;
  886         v &= ~(HME_MAC_RXCFG_DCRCS);
  887         CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
  888         HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
  889 
  890         /* step 13. TX_MAC Configuration Register */
  891         v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
  892         v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
  893         CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
  894         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
  895 
  896         /* step 14. Issue Transmit Pending command */
  897 
  898 #ifdef HMEDEBUG
  899         /* Debug: double-check. */
  900         CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
  901             "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
  902             HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
  903             HME_ERX_READ_4(sc, HME_ERXI_RING),
  904             HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
  905         CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
  906             HME_SEB_READ_4(sc, HME_SEBI_IMASK),
  907             HME_ERX_READ_4(sc, HME_ERXI_CFG),
  908             HME_ETX_READ_4(sc, HME_ETXI_CFG));
  909         CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
  910             HME_MAC_READ_4(sc, HME_MACI_RXCFG),
  911             HME_MAC_READ_4(sc, HME_MACI_TXCFG));
  912 #endif
  913 
  914         ifp->if_drv_flags |= IFF_DRV_RUNNING;
  915         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
  916 
  917         /* Set the current media. */
  918         hme_mediachange_locked(sc);
  919 
  920         /* Start the one second timer. */
  921         sc->sc_wdog_timer = 0;
  922         callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
  923 }
  924 
  925 /*
  926  * Routine to DMA map an mbuf chain, set up the descriptor rings
  927  * accordingly and start the transmission.
  928  * Returns 0 on success, -1 if there were not enough free descriptors
  929  * to map the packet, or an errno otherwise.
  930  *
  931  * XXX: this relies on the fact that segments returned by
  932  * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
  933  * boundary on (i.e. potentially before ds_addr) to the first
  934  * boundary beyond the end.  This is usually a safe assumption to
  935  * make, but is not documented.
  936  */
  937 static int
  938 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
  939 {
  940         bus_dma_segment_t segs[HME_NTXSEGS];
  941         struct hme_txdesc *htx;
  942         struct ip *ip;
  943         struct mbuf *m;
  944         caddr_t txd;
  945         int error, i, nsegs, pci, ri, si;
  946         uint32_t cflags, flags;
  947 
  948         if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
  949                 return (ENOBUFS);
  950 
  951         cflags = 0;
  952         if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
  953                 if (M_WRITABLE(*m0) == 0) {
  954                         m = m_dup(*m0, M_DONTWAIT);
  955                         m_freem(*m0);
  956                         *m0 = m;
  957                         if (m == NULL)
  958                                 return (ENOBUFS);
  959                 }
  960                 i = sizeof(struct ether_header);
  961                 m = m_pullup(*m0, i + sizeof(struct ip));
  962                 if (m == NULL) {
  963                         *m0 = NULL;
  964                         return (ENOBUFS);
  965                 }
  966                 ip = (struct ip *)(mtod(m, caddr_t) + i);
  967                 i += (ip->ip_hl << 2);
  968                 cflags = i << HME_XD_TXCKSUM_SSHIFT |
  969                     ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
  970                     HME_XD_TXCKSUM;
  971                 *m0 = m;
  972         }
  973 
  974         error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  975             *m0, segs, &nsegs, 0);
  976         if (error == EFBIG) {
  977                 m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
  978                 if (m == NULL) {
  979                         m_freem(*m0);
  980                         *m0 = NULL;
  981                         return (ENOMEM);
  982                 }
  983                 *m0 = m;
  984                 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
  985                     *m0, segs, &nsegs, 0);
  986                 if (error != 0) {
  987                         m_freem(*m0);
  988                         *m0 = NULL;
  989                         return (error);
  990                 }
  991         } else if (error != 0)
  992                 return (error);
  993         /* If nsegs is wrong then the stack is corrupt. */
  994         KASSERT(nsegs <= HME_NTXSEGS,
  995             ("%s: too many DMA segments (%d)", __func__, nsegs));
  996         if (nsegs == 0) {
  997                 m_freem(*m0);
  998                 *m0 = NULL;
  999                 return (EIO);
 1000         }
 1001         if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
 1002                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1003                 /* Retry with m_collapse(9)? */
 1004                 return (ENOBUFS);
 1005         }
 1006         bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
 1007 
 1008         si = ri = sc->sc_rb.rb_tdhead;
 1009         txd = sc->sc_rb.rb_txd;
 1010         pci = sc->sc_flags & HME_PCI;
 1011         CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
 1012             HME_XD_GETFLAGS(pci, txd, ri));
 1013         for (i = 0; i < nsegs; i++) {
 1014                 /* Fill the ring entry. */
 1015                 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
 1016                 if (i == 0)
 1017                         flags |= HME_XD_SOP | cflags;
 1018                 else
 1019                         flags |= HME_XD_OWN | cflags;
 1020                 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
 1021                     ri, si, flags);
 1022                 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
 1023                 HME_XD_SETFLAGS(pci, txd, ri, flags);
 1024                 sc->sc_rb.rb_td_nbusy++;
 1025                 htx->htx_lastdesc = ri;
 1026                 ri = (ri + 1) % HME_NTXDESC;
 1027         }
 1028         sc->sc_rb.rb_tdhead = ri;
 1029 
 1030         /* set EOP on the last descriptor */
 1031         ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
 1032         flags = HME_XD_GETFLAGS(pci, txd, ri);
 1033         flags |= HME_XD_EOP;
 1034         CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
 1035             flags);
 1036         HME_XD_SETFLAGS(pci, txd, ri, flags);
 1037 
 1038         /* Turn the first descriptor ownership to the hme */
 1039         flags = HME_XD_GETFLAGS(pci, txd, si);
 1040         flags |= HME_XD_OWN;
 1041         CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
 1042             ri, flags);
 1043         HME_XD_SETFLAGS(pci, txd, si, flags);
 1044 
 1045         STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
 1046         STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
 1047         htx->htx_m = *m0;
 1048 
 1049         /* start the transmission. */
 1050         HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
 1051 
 1052         return (0);
 1053 }
 1054 
 1055 /*
 1056  * Pass a packet to the higher levels.
 1057  */
 1058 static void
 1059 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
 1060 {
 1061         struct ifnet *ifp = sc->sc_ifp;
 1062         struct mbuf *m;
 1063 
 1064         if (len <= sizeof(struct ether_header) ||
 1065             len > HME_MAX_FRAMESIZE) {
 1066 #ifdef HMEDEBUG
 1067                 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
 1068                     len);
 1069 #endif
 1070                 ifp->if_ierrors++;
 1071                 hme_discard_rxbuf(sc, ix);
 1072                 return;
 1073         }
 1074 
 1075         m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
 1076         CTR1(KTR_HME, "hme_read: len %d", len);
 1077 
 1078         if (hme_add_rxbuf(sc, ix, 0) != 0) {
 1079                 /*
 1080                  * hme_add_rxbuf will leave the old buffer in the ring until
 1081                  * it is sure that a new buffer can be mapped. If it can not,
 1082                  * drop the packet, but leave the interface up.
 1083                  */
 1084                 ifp->if_iqdrops++;
 1085                 hme_discard_rxbuf(sc, ix);
 1086                 return;
 1087         }
 1088 
 1089         ifp->if_ipackets++;
 1090 
 1091         m->m_pkthdr.rcvif = ifp;
 1092         m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
 1093         m_adj(m, HME_RXOFFS);
 1094         /* RX TCP/UDP checksum */
 1095         if (ifp->if_capenable & IFCAP_RXCSUM)
 1096                 hme_rxcksum(m, flags);
 1097         /* Pass the packet up. */
 1098         HME_UNLOCK(sc);
 1099         (*ifp->if_input)(ifp, m);
 1100         HME_LOCK(sc);
 1101 }
 1102 
 1103 static void
 1104 hme_start(struct ifnet *ifp)
 1105 {
 1106         struct hme_softc *sc = ifp->if_softc;
 1107 
 1108         HME_LOCK(sc);
 1109         hme_start_locked(ifp);
 1110         HME_UNLOCK(sc);
 1111 }
 1112 
 1113 static void
 1114 hme_start_locked(struct ifnet *ifp)
 1115 {
 1116         struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
 1117         struct mbuf *m;
 1118         int error, enq = 0;
 1119 
 1120         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
 1121             IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
 1122                 return;
 1123 
 1124         for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
 1125             sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
 1126                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
 1127                 if (m == NULL)
 1128                         break;
 1129 
 1130                 error = hme_load_txmbuf(sc, &m);
 1131                 if (error != 0) {
 1132                         if (m == NULL)
 1133                                 break;
 1134                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 1135                         IFQ_DRV_PREPEND(&ifp->if_snd, m);
 1136                         break;
 1137                 }
 1138                 enq++;
 1139                 BPF_MTAP(ifp, m);
 1140         }
 1141 
 1142         if (enq > 0) {
 1143                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1144                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1145                 sc->sc_wdog_timer = 5;
 1146         }
 1147 }
 1148 
 1149 /*
 1150  * Transmit interrupt.
 1151  */
 1152 static void
 1153 hme_tint(struct hme_softc *sc)
 1154 {
 1155         caddr_t txd;
 1156         struct ifnet *ifp = sc->sc_ifp;
 1157         struct hme_txdesc *htx;
 1158         unsigned int ri, txflags;
 1159 
 1160         txd = sc->sc_rb.rb_txd;
 1161         htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1162         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1163         /* Fetch current position in the transmit ring */
 1164         for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
 1165                 if (sc->sc_rb.rb_td_nbusy <= 0) {
 1166                         CTR0(KTR_HME, "hme_tint: not busy!");
 1167                         break;
 1168                 }
 1169 
 1170                 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
 1171                 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
 1172 
 1173                 if ((txflags & HME_XD_OWN) != 0)
 1174                         break;
 1175 
 1176                 CTR0(KTR_HME, "hme_tint: not owned");
 1177                 --sc->sc_rb.rb_td_nbusy;
 1178                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 1179 
 1180                 /* Complete packet transmitted? */
 1181                 if ((txflags & HME_XD_EOP) == 0)
 1182                         continue;
 1183 
 1184                 KASSERT(htx->htx_lastdesc == ri,
 1185                     ("%s: ring indices skewed: %d != %d!",
 1186                     __func__, htx->htx_lastdesc, ri));
 1187                 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
 1188                     BUS_DMASYNC_POSTWRITE);
 1189                 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
 1190 
 1191                 ifp->if_opackets++;
 1192                 m_freem(htx->htx_m);
 1193                 htx->htx_m = NULL;
 1194                 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
 1195                 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
 1196                 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
 1197         }
 1198         sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
 1199 
 1200         /* Update ring */
 1201         sc->sc_rb.rb_tdtail = ri;
 1202 
 1203         hme_start_locked(ifp);
 1204 }
 1205 
 1206 /*
 1207  * RX TCP/UDP checksum
 1208  */
 1209 static void
 1210 hme_rxcksum(struct mbuf *m, u_int32_t flags)
 1211 {
 1212         struct ether_header *eh;
 1213         struct ip *ip;
 1214         struct udphdr *uh;
 1215         int32_t hlen, len, pktlen;
 1216         u_int16_t cksum, *opts;
 1217         u_int32_t temp32;
 1218 
 1219         pktlen = m->m_pkthdr.len;
 1220         if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
 1221                 return;
 1222         eh = mtod(m, struct ether_header *);
 1223         if (eh->ether_type != htons(ETHERTYPE_IP))
 1224                 return;
 1225         ip = (struct ip *)(eh + 1);
 1226         if (ip->ip_v != IPVERSION)
 1227                 return;
 1228 
 1229         hlen = ip->ip_hl << 2;
 1230         pktlen -= sizeof(struct ether_header);
 1231         if (hlen < sizeof(struct ip))
 1232                 return;
 1233         if (ntohs(ip->ip_len) < hlen)
 1234                 return;
 1235         if (ntohs(ip->ip_len) != pktlen)
 1236                 return;
 1237         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
 1238                 return; /* can't handle fragmented packet */
 1239 
 1240         switch (ip->ip_p) {
 1241         case IPPROTO_TCP:
 1242                 if (pktlen < (hlen + sizeof(struct tcphdr)))
 1243                         return;
 1244                 break;
 1245         case IPPROTO_UDP:
 1246                 if (pktlen < (hlen + sizeof(struct udphdr)))
 1247                         return;
 1248                 uh = (struct udphdr *)((caddr_t)ip + hlen);
 1249                 if (uh->uh_sum == 0)
 1250                         return; /* no checksum */
 1251                 break;
 1252         default:
 1253                 return;
 1254         }
 1255 
 1256         cksum = ~(flags & HME_XD_RXCKSUM);
 1257         /* checksum fixup for IP options */
 1258         len = hlen - sizeof(struct ip);
 1259         if (len > 0) {
 1260                 opts = (u_int16_t *)(ip + 1);
 1261                 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
 1262                         temp32 = cksum - *opts;
 1263                         temp32 = (temp32 >> 16) + (temp32 & 65535);
 1264                         cksum = temp32 & 65535;
 1265                 }
 1266         }
 1267         m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
 1268         m->m_pkthdr.csum_data = cksum;
 1269 }
 1270 
 1271 /*
 1272  * Receive interrupt.
 1273  */
 1274 static void
 1275 hme_rint(struct hme_softc *sc)
 1276 {
 1277         caddr_t xdr = sc->sc_rb.rb_rxd;
 1278         struct ifnet *ifp = sc->sc_ifp;
 1279         unsigned int ri, len;
 1280         int progress = 0;
 1281         u_int32_t flags;
 1282 
 1283         /*
 1284          * Process all buffers with valid data.
 1285          */
 1286         bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
 1287         for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
 1288                 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
 1289                 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
 1290                 if ((flags & HME_XD_OWN) != 0)
 1291                         break;
 1292 
 1293                 progress++;
 1294                 if ((flags & HME_XD_OFL) != 0) {
 1295                         device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
 1296                             "flags=0x%x\n", ri, flags);
 1297                         ifp->if_ierrors++;
 1298                         hme_discard_rxbuf(sc, ri);
 1299                 } else {
 1300                         len = HME_XD_DECODE_RSIZE(flags);
 1301                         hme_read(sc, ri, len, flags);
 1302                 }
 1303         }
 1304         if (progress) {
 1305                 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
 1306                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 1307         }
 1308         sc->sc_rb.rb_rdtail = ri;
 1309 }
 1310 
 1311 static void
 1312 hme_eint(struct hme_softc *sc, u_int status)
 1313 {
 1314 
 1315         if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
 1316                 device_printf(sc->sc_dev, "XXXlink status changed: "
 1317                     "cfg=%#x, stat=%#x, sm=%#x\n",
 1318                     HME_MIF_READ_4(sc, HME_MIFI_CFG),
 1319                     HME_MIF_READ_4(sc, HME_MIFI_STAT),
 1320                     HME_MIF_READ_4(sc, HME_MIFI_SM));
 1321                 return;
 1322         }
 1323 
 1324         /* check for fatal errors that needs reset to unfreeze DMA engine */
 1325         if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
 1326                 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
 1327                 hme_init_locked(sc);
 1328         }
 1329 }
 1330 
 1331 void
 1332 hme_intr(void *v)
 1333 {
 1334         struct hme_softc *sc = (struct hme_softc *)v;
 1335         u_int32_t status;
 1336 
 1337         HME_LOCK(sc);
 1338         status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
 1339         CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
 1340 
 1341         if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
 1342                 hme_eint(sc, status);
 1343 
 1344         if ((status & HME_SEB_STAT_RXTOHOST) != 0)
 1345                 hme_rint(sc);
 1346 
 1347         if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
 1348                 hme_tint(sc);
 1349         HME_UNLOCK(sc);
 1350 }
 1351 
 1352 static int
 1353 hme_watchdog(struct hme_softc *sc)
 1354 {
 1355         struct ifnet *ifp = sc->sc_ifp;
 1356 
 1357         HME_LOCK_ASSERT(sc, MA_OWNED);
 1358 
 1359 #ifdef HMEDEBUG
 1360         CTR1(KTR_HME, "hme_watchdog: status %x",
 1361             (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
 1362 #endif
 1363 
 1364         if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
 1365                 return (0);
 1366 
 1367         if ((sc->sc_flags & HME_LINK) != 0)
 1368                 device_printf(sc->sc_dev, "device timeout\n");
 1369         else if (bootverbose)
 1370                 device_printf(sc->sc_dev, "device timeout (no link)\n");
 1371         ++ifp->if_oerrors;
 1372 
 1373         hme_init_locked(sc);
 1374         hme_start_locked(ifp);
 1375         return (EJUSTRETURN);
 1376 }
 1377 
 1378 /*
 1379  * Initialize the MII Management Interface
 1380  */
 1381 static void
 1382 hme_mifinit(struct hme_softc *sc)
 1383 {
 1384         u_int32_t v;
 1385 
 1386         /*
 1387          * Configure the MIF in frame mode, polling disabled, internal PHY
 1388          * selected.
 1389          */
 1390         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
 1391 
 1392         /*
 1393          * If the currently selected media uses the external transceiver,
 1394          * enable its MII drivers (which basically isolates the internal
 1395          * one and vice versa). In case the current media hasn't been set,
 1396          * yet, we default to the internal transceiver.
 1397          */
 1398         v = HME_MAC_READ_4(sc, HME_MACI_XIF);
 1399         if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
 1400             sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
 1401             HME_PHYAD_EXTERNAL)
 1402                 v |= HME_MAC_XIF_MIIENABLE;
 1403         else
 1404                 v &= ~HME_MAC_XIF_MIIENABLE;
 1405         HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
 1406 }
 1407 
 1408 /*
 1409  * MII interface
 1410  */
 1411 int
 1412 hme_mii_readreg(device_t dev, int phy, int reg)
 1413 {
 1414         struct hme_softc *sc;
 1415         int n;
 1416         u_int32_t v;
 1417 
 1418         sc = device_get_softc(dev);
 1419         /* Select the desired PHY in the MIF configuration register */
 1420         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1421         if (phy == HME_PHYAD_EXTERNAL)
 1422                 v |= HME_MIF_CFG_PHY;
 1423         else
 1424                 v &= ~HME_MIF_CFG_PHY;
 1425         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1426 
 1427         /* Construct the frame command */
 1428         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
 1429             HME_MIF_FO_TAMSB |
 1430             (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
 1431             (phy << HME_MIF_FO_PHYAD_SHIFT) |
 1432             (reg << HME_MIF_FO_REGAD_SHIFT);
 1433 
 1434         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1435         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1436             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1437         for (n = 0; n < 100; n++) {
 1438                 DELAY(1);
 1439                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1440                 if (v & HME_MIF_FO_TALSB)
 1441                         return (v & HME_MIF_FO_DATA);
 1442         }
 1443 
 1444         device_printf(sc->sc_dev, "mii_read timeout\n");
 1445         return (0);
 1446 }
 1447 
 1448 int
 1449 hme_mii_writereg(device_t dev, int phy, int reg, int val)
 1450 {
 1451         struct hme_softc *sc;
 1452         int n;
 1453         u_int32_t v;
 1454 
 1455         sc = device_get_softc(dev);
 1456         /* Select the desired PHY in the MIF configuration register */
 1457         v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
 1458         if (phy == HME_PHYAD_EXTERNAL)
 1459                 v |= HME_MIF_CFG_PHY;
 1460         else
 1461                 v &= ~HME_MIF_CFG_PHY;
 1462         HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
 1463 
 1464         /* Construct the frame command */
 1465         v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)  |
 1466             HME_MIF_FO_TAMSB                            |
 1467             (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
 1468             (phy << HME_MIF_FO_PHYAD_SHIFT)             |
 1469             (reg << HME_MIF_FO_REGAD_SHIFT)             |
 1470             (val & HME_MIF_FO_DATA);
 1471 
 1472         HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
 1473         HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
 1474             BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
 1475         for (n = 0; n < 100; n++) {
 1476                 DELAY(1);
 1477                 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
 1478                 if (v & HME_MIF_FO_TALSB)
 1479                         return (1);
 1480         }
 1481 
 1482         device_printf(sc->sc_dev, "mii_write timeout\n");
 1483         return (0);
 1484 }
 1485 
 1486 void
 1487 hme_mii_statchg(device_t dev)
 1488 {
 1489         struct hme_softc *sc;
 1490         uint32_t rxcfg, txcfg;
 1491 
 1492         sc = device_get_softc(dev);
 1493 
 1494 #ifdef HMEDEBUG
 1495         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1496                 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
 1497 #endif
 1498 
 1499         if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
 1500             IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
 1501                 sc->sc_flags |= HME_LINK;
 1502         else
 1503                 sc->sc_flags &= ~HME_LINK;
 1504 
 1505         txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
 1506         if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
 1507             HME_MAC_TXCFG_ENABLE, 0))
 1508                 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
 1509         rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1510         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
 1511             HME_MAC_RXCFG_ENABLE, 0))
 1512                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1513 
 1514         /* Set the MAC Full Duplex bit appropriately. */
 1515         if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
 1516                 txcfg |= HME_MAC_TXCFG_FULLDPLX;
 1517         else
 1518                 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
 1519         HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
 1520 
 1521         if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1522             (sc->sc_flags & HME_LINK) != 0) {
 1523                 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
 1524                     HME_MAC_TXCFG_ENABLE))
 1525                         device_printf(sc->sc_dev, "cannot enable TX MAC\n");
 1526                 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
 1527                     HME_MAC_RXCFG_ENABLE))
 1528                         device_printf(sc->sc_dev, "cannot enable RX MAC\n");
 1529         }
 1530 }
 1531 
 1532 static int
 1533 hme_mediachange(struct ifnet *ifp)
 1534 {
 1535         struct hme_softc *sc = ifp->if_softc;
 1536         int error;
 1537 
 1538         HME_LOCK(sc);
 1539         error = hme_mediachange_locked(sc);
 1540         HME_UNLOCK(sc);
 1541         return (error);
 1542 }
 1543 
 1544 static int
 1545 hme_mediachange_locked(struct hme_softc *sc)
 1546 {
 1547         struct mii_softc *child;
 1548 
 1549         HME_LOCK_ASSERT(sc, MA_OWNED);
 1550 
 1551 #ifdef HMEDEBUG
 1552         if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
 1553                 device_printf(sc->sc_dev, "hme_mediachange_locked");
 1554 #endif
 1555 
 1556         hme_mifinit(sc);
 1557 
 1558         /*
 1559          * If both PHYs are present reset them. This is required for
 1560          * unisolating the previously isolated PHY when switching PHYs.
 1561          * As the above hme_mifinit() call will set the MII drivers in
 1562          * the XIF configuration register according to the currently
 1563          * selected media, there should be no window during which the
 1564          * data paths of both transceivers are open at the same time,
 1565          * even if the PHY device drivers use MIIF_NOISOLATE.
 1566          */
 1567         if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
 1568                 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
 1569                         mii_phy_reset(child);
 1570         return (mii_mediachg(sc->sc_mii));
 1571 }
 1572 
 1573 static void
 1574 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
 1575 {
 1576         struct hme_softc *sc = ifp->if_softc;
 1577 
 1578         HME_LOCK(sc);
 1579         if ((ifp->if_flags & IFF_UP) == 0) {
 1580                 HME_UNLOCK(sc);
 1581                 return;
 1582         }
 1583 
 1584         mii_pollstat(sc->sc_mii);
 1585         ifmr->ifm_active = sc->sc_mii->mii_media_active;
 1586         ifmr->ifm_status = sc->sc_mii->mii_media_status;
 1587         HME_UNLOCK(sc);
 1588 }
 1589 
 1590 /*
 1591  * Process an ioctl request.
 1592  */
 1593 static int
 1594 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
 1595 {
 1596         struct hme_softc *sc = ifp->if_softc;
 1597         struct ifreq *ifr = (struct ifreq *)data;
 1598         int error = 0;
 1599 
 1600         switch (cmd) {
 1601         case SIOCSIFFLAGS:
 1602                 HME_LOCK(sc);
 1603                 if ((ifp->if_flags & IFF_UP) != 0) {
 1604                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
 1605                             ((ifp->if_flags ^ sc->sc_ifflags) &
 1606                             (IFF_ALLMULTI | IFF_PROMISC)) != 0)
 1607                                 hme_setladrf(sc, 1);
 1608                         else
 1609                                 hme_init_locked(sc);
 1610                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
 1611                         hme_stop(sc);
 1612                 if ((ifp->if_flags & IFF_LINK0) != 0)
 1613                         sc->sc_csum_features |= CSUM_UDP;
 1614                 else
 1615                         sc->sc_csum_features &= ~CSUM_UDP;
 1616                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1617                         ifp->if_hwassist = sc->sc_csum_features;
 1618                 sc->sc_ifflags = ifp->if_flags;
 1619                 HME_UNLOCK(sc);
 1620                 break;
 1621 
 1622         case SIOCADDMULTI:
 1623         case SIOCDELMULTI:
 1624                 HME_LOCK(sc);
 1625                 hme_setladrf(sc, 1);
 1626                 HME_UNLOCK(sc);
 1627                 error = 0;
 1628                 break;
 1629         case SIOCGIFMEDIA:
 1630         case SIOCSIFMEDIA:
 1631                 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
 1632                 break;
 1633         case SIOCSIFCAP:
 1634                 HME_LOCK(sc);
 1635                 ifp->if_capenable = ifr->ifr_reqcap;
 1636                 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
 1637                         ifp->if_hwassist = sc->sc_csum_features;
 1638                 else
 1639                         ifp->if_hwassist = 0;
 1640                 HME_UNLOCK(sc);
 1641                 break;
 1642         default:
 1643                 error = ether_ioctl(ifp, cmd, data);
 1644                 break;
 1645         }
 1646 
 1647         return (error);
 1648 }
 1649 
 1650 /*
 1651  * Set up the logical address filter.
 1652  */
 1653 static void
 1654 hme_setladrf(struct hme_softc *sc, int reenable)
 1655 {
 1656         struct ifnet *ifp = sc->sc_ifp;
 1657         struct ifmultiaddr *inm;
 1658         u_int32_t crc;
 1659         u_int32_t hash[4];
 1660         u_int32_t macc;
 1661 
 1662         HME_LOCK_ASSERT(sc, MA_OWNED);
 1663         /* Clear the hash table. */
 1664         hash[3] = hash[2] = hash[1] = hash[0] = 0;
 1665 
 1666         /* Get the current RX configuration. */
 1667         macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
 1668 
 1669         /*
 1670          * Turn off promiscuous mode, promiscuous group mode (all multicast),
 1671          * and hash filter.  Depending on the case, the right bit will be
 1672          * enabled.
 1673          */
 1674         macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
 1675 
 1676         /*
 1677          * Disable the receiver while changing it's state as the documentation
 1678          * mandates.
 1679          * We then must wait until the bit clears in the register. This should
 1680          * take at most 3.5ms.
 1681          */
 1682         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1683             HME_MAC_RXCFG_ENABLE, 0))
 1684                 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
 1685         /* Disable the hash filter before writing to the filter registers. */
 1686         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
 1687             HME_MAC_RXCFG_HENABLE, 0))
 1688                 device_printf(sc->sc_dev, "cannot disable hash filter\n");
 1689 
 1690         /* Make the RX MAC really SIMPLEX. */
 1691         macc |= HME_MAC_RXCFG_ME;
 1692         if (reenable)
 1693                 macc |= HME_MAC_RXCFG_ENABLE;
 1694         else
 1695                 macc &= ~HME_MAC_RXCFG_ENABLE;
 1696 
 1697         if ((ifp->if_flags & IFF_PROMISC) != 0) {
 1698                 macc |= HME_MAC_RXCFG_PMISC;
 1699                 goto chipit;
 1700         }
 1701         if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
 1702                 macc |= HME_MAC_RXCFG_PGRP;
 1703                 goto chipit;
 1704         }
 1705 
 1706         macc |= HME_MAC_RXCFG_HENABLE;
 1707 
 1708         /*
 1709          * Set up multicast address filter by passing all multicast addresses
 1710          * through a crc generator, and then using the high order 6 bits as an
 1711          * index into the 64 bit logical address filter.  The high order bit
 1712          * selects the word, while the rest of the bits select the bit within
 1713          * the word.
 1714          */
 1715 
 1716         if_maddr_rlock(ifp);
 1717         TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
 1718                 if (inm->ifma_addr->sa_family != AF_LINK)
 1719                         continue;
 1720                 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
 1721                     inm->ifma_addr), ETHER_ADDR_LEN);
 1722 
 1723                 /* Just want the 6 most significant bits. */
 1724                 crc >>= 26;
 1725 
 1726                 /* Set the corresponding bit in the filter. */
 1727                 hash[crc >> 4] |= 1 << (crc & 0xf);
 1728         }
 1729         if_maddr_runlock(ifp);
 1730 
 1731 chipit:
 1732         /* Now load the hash table into the chip */
 1733         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
 1734         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
 1735         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
 1736         HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
 1737         if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
 1738             macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
 1739             HME_MAC_RXCFG_ME)))
 1740                 device_printf(sc->sc_dev, "cannot configure RX MAC\n");
 1741 }

Cache object: 221d3662b765d2131656bcc58a5f1163


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.